From 6e41400e0aca594ae8cf4a10feba0af21f074118 Mon Sep 17 00:00:00 2001 From: TensorFlow Lattice Authors Date: Tue, 28 Jan 2020 13:33:06 -0800 Subject: [PATCH] Internal change PiperOrigin-RevId: 292002256 Change-Id: Ib103e46dd6c9f5e86de0f707ce2842a1d62b7693 --- .bazelrc | 12 - .gitignore | 12 - BUILD | 23 - INSTALL.md | 202 -- MANIFEST.in | 6 - README.md | 52 +- WORKSPACE | 81 +- build_docs.py | 85 + build_tools/ci_build/ci_common.sh | 36 - build_tools/ci_build/macosx/py2.sh | 43 - build_tools/ci_build/macosx/py3.sh | 43 - build_tools/ci_build/ubuntu/py2.sh | 43 - build_tools/ci_build/ubuntu/py3.sh | 43 - build_tools/common.sh | 100 - build_tools/release_build/py2.sh | 42 - build_tools/release_build/py2_gpu.sh | 42 - build_tools/release_build/py3.sh | 42 - build_tools/release_build/py3_gpu.sh | 42 - build_tools/release_build/release_common.sh | 72 - docs/_book.yaml | 35 + docs/_index.yaml | 81 + {g3doc => docs}/images/2d_lattice.png | Bin docs/images/data_dist.png | Bin 0 -> 8942 bytes docs/images/favicon.ico | Bin 0 -> 941 bytes docs/images/flexible_fit.png | Bin 0 -> 21883 bytes docs/images/linear_fit.png | Bin 0 -> 20351 bytes docs/images/model_comparison.png | Bin 0 -> 21353 bytes docs/images/monotonic_fit.png | Bin 0 -> 20102 bytes docs/images/pwl_calibration_distance.png | Bin 0 -> 17141 bytes docs/images/pwl_calibration_price.png | Bin 0 -> 19095 bytes docs/images/regularized_fit.png | Bin 0 -> 21423 bytes {g3doc => docs}/images/tensorflow_lattice.png | Bin docs/install.md | 39 + docs/overview.md | 200 ++ docs/tutorials/canned_estimators.ipynb | 722 ++++++ docs/tutorials/custom_estimators.ipynb | 443 ++++ docs/tutorials/keras_layers.ipynb | 838 ++++++ docs/tutorials/shape_constraints.ipynb | 1262 +++++++++ examples/BUILD | 62 + examples/canned_estimators_uci_heart.py | 325 +++ examples/coffee_test.py | 84 - examples/custom_estimators_uci_heart.py | 170 ++ examples/estimator_test.py | 62 - examples/etl_1d.py | 310 --- examples/image_compression.py | 140 - examples/keras_functional_uci_heart.py | 314 +++ examples/keras_sequential_uci_heart.py | 275 ++ examples/lattice_test.py | 28 - examples/uci_census.py | 557 ---- g3doc/api_docs/python/_toc.yaml | 62 - g3doc/api_docs/python/index.md | 31 - g3doc/api_docs/python/tensorflow_lattice.md | 83 - .../CalibratedEtlHParams.md | 294 --- .../tensorflow_lattice/CalibratedHParams.md | 282 -- .../CalibratedLatticeHParams.md | 277 -- .../CalibratedLinearHParams.md | 260 -- .../CalibratedRtlHParams.md | 283 --- .../tensorflow_lattice/PerFeatureHParams.md | 297 --- .../calibrated_etl_classifier.md | 114 - .../calibrated_etl_regressor.md | 112 - .../calibrated_lattice_classifier.md | 107 - .../calibrated_lattice_regressor.md | 108 - .../calibrated_linear_classifier.md | 111 - .../calibrated_linear_regressor.md | 112 - .../calibrated_rtl_classifier.md | 114 - .../calibrated_rtl_regressor.md | 112 - .../tensorflow_lattice/calibration_layer.md | 96 - .../calibrator_regularization.md | 39 - .../ensemble_lattices_layer.md | 69 - .../input_calibration_layer.md | 102 - .../input_calibration_layer_from_hparams.md | 76 - .../python/tensorflow_lattice/lattice.md | 36 - .../tensorflow_lattice/lattice_layer.md | 69 - .../lattice_regularization.md | 49 - .../load_keypoints_from_quantiles.md | 57 - .../tensorflow_lattice/monotone_lattice.md | 58 - .../monotonic_projection.md | 41 - .../pwl_indexing_calibrator.md | 42 - .../save_quantiles_for_keypoints.md | 78 - .../uniform_keypoints_for_signal.md | 47 - g3doc/images/data_dist.png | Bin 654474 -> 0 bytes g3doc/images/deep_lattice_networks.png | Bin 162953 -> 0 bytes g3doc/images/mono_1_of_4.png | Bin 21425 -> 0 bytes g3doc/images/mono_2_of_4.png | Bin 34561 -> 0 bytes g3doc/images/mono_3_of_4.png | Bin 31254 -> 0 bytes g3doc/images/mono_4_of_4.png | Bin 36209 -> 0 bytes g3doc/images/pwl_calibration_distance.png | Bin 20211 -> 0 bytes g3doc/images/pwl_calibration_price.png | Bin 23018 -> 0 bytes g3doc/tutorial/images/2d_lattice.png | Bin 35608 -> 0 bytes g3doc/tutorial/images/data_dist.png | Bin 654474 -> 0 bytes .../tutorial/images/deep_lattice_networks.png | Bin 162953 -> 0 bytes g3doc/tutorial/images/mono_1_of_4.png | Bin 21425 -> 0 bytes g3doc/tutorial/images/mono_2_of_4.png | Bin 34561 -> 0 bytes g3doc/tutorial/images/mono_3_of_4.png | Bin 31254 -> 0 bytes g3doc/tutorial/images/mono_4_of_4.png | Bin 36209 -> 0 bytes .../images/pwl_calibration_distance.png | Bin 20211 -> 0 bytes .../tutorial/images/pwl_calibration_price.png | Bin 23018 -> 0 bytes g3doc/tutorial/index.md | 883 ------- pip_pkg.sh | 95 - setup.py | 171 +- tensorflow | 1 - tensorflow_lattice/BUILD | 48 +- tensorflow_lattice/__init__.py | 68 +- tensorflow_lattice/cc/BUILD | 204 -- tensorflow_lattice/cc/kernels/BUILD | 166 -- .../hypercube_interpolation_kernels.cc | 348 --- .../cc/kernels/lattice_interpolation_base.cc | 54 - .../cc/kernels/lattice_interpolation_base.h | 233 -- .../cc/kernels/lattice_raw_iterator.cc | 37 - .../cc/kernels/lattice_raw_iterator.h | 63 - .../cc/kernels/lattice_raw_iterator_test.cc | 111 - .../cc/kernels/monotone_lattice_kernels.cc | 184 -- .../kernels/monotonic_lattice_projections.h | 326 --- .../monotonic_lattice_projections_test.cc | 135 - .../cc/kernels/monotonic_projection_kernel.cc | 93 - .../cc/kernels/monotonic_projections.h | 161 -- .../pwl_indexing_calibrator_kernels.cc | 759 ------ .../kernels/simplex_interpolation_kernels.cc | 285 --- tensorflow_lattice/cc/lib/BUILD | 32 - .../cc/lib/lattice_structure.cc | 51 - tensorflow_lattice/cc/lib/lattice_structure.h | 159 -- .../ops/hypercube_interpolation_ops_test.cc | 87 - .../cc/ops/hypercube_interpolation_ops_test.h | 35 - .../ops/hypercube_interpolation_ops_test_p.cc | 93 - .../cc/ops/lattice_interpolation_ops.cc | 184 -- .../cc/ops/monotone_lattice_ops.cc | 89 - .../cc/ops/monotone_lattice_ops_test.cc | 187 -- .../cc/ops/monotonic_projection_op.cc | 62 - .../cc/ops/monotonic_projection_op_test.cc | 99 - .../cc/ops/pwl_indexing_calibrator_ops.cc | 190 -- .../ops/pwl_indexing_calibrator_ops_test.cc | 480 ---- .../cc/ops/simplex_interpolation_ops_test.cc | 143 -- tensorflow_lattice/cc/test_tools/test_main.cc | 23 - tensorflow_lattice/cc/tflite_ops/BUILD | 152 -- tensorflow_lattice/cc/tflite_ops/README.md | 202 -- .../cc/tflite_ops/freeze_graph_wrapper.py | 33 - tensorflow_lattice/cc/tflite_ops/helpers.h | 94 - .../hypercube_interpolation_test.cc | 221 -- .../cc/tflite_ops/interpolation.cc | 254 -- .../cc/tflite_ops/pwl_indexing_calibrator.cc | 231 -- .../pwl_indexing_calibrator_sparse_test.cc | 157 -- .../pwl_indexing_calibrator_test.cc | 143 -- .../tflite_ops/simplex_interpolation_test.cc | 118 - .../cc/tflite_ops/tflite_ops.cc | 33 - tensorflow_lattice/cc/tflite_ops/tflite_ops.h | 41 - .../cc/tflite_ops/toco_wrapper.py | 35 - .../{python/estimators => layers}/__init__.py | 14 +- tensorflow_lattice/python/BUILD | 330 ++- tensorflow_lattice/python/__init__.py | 4 +- .../python/categorical_calibration_layer.py | 310 +++ .../python/categorical_calibration_lib.py | 161 ++ .../python/categorical_calibration_test.py | 316 +++ tensorflow_lattice/python/configs.py | 841 ++++++ tensorflow_lattice/python/configs_test.py | 126 + tensorflow_lattice/python/estimators.py | 2264 +++++++++++++++++ tensorflow_lattice/python/estimators/BUILD | 227 -- tensorflow_lattice/python/estimators/base.py | 253 -- .../python/estimators/base_test.py | 116 - .../python/estimators/calibrated.py | 515 ---- .../python/estimators/calibrated_etl.py | 685 ----- .../python/estimators/calibrated_etl_test.py | 389 --- .../python/estimators/calibrated_lattice.py | 460 ---- .../estimators/calibrated_lattice_test.py | 406 --- .../python/estimators/calibrated_linear.py | 365 --- .../estimators/calibrated_linear_test.py | 249 -- .../python/estimators/calibrated_rtl.py | 562 ---- .../python/estimators/calibrated_rtl_test.py | 456 ---- .../python/estimators/calibrated_test.py | 113 - .../python/estimators/hparams.py | 626 ----- .../python/estimators/hparams_test.py | 182 -- .../estimators/separately_calibrated_rtl.py | 569 ----- .../separately_calibrated_rtl_test.py | 432 ---- tensorflow_lattice/python/estimators_test.py | 715 ++++++ tensorflow_lattice/python/kernel_tests/BUILD | 46 - .../kernel_tests/lattice_gradient_test.py | 222 -- .../kernel_tests/pwl_calibration_test.py | 92 - tensorflow_lattice/python/lattice_layer.py | 851 +++++++ tensorflow_lattice/python/lattice_lib.py | 2142 ++++++++++++++++ tensorflow_lattice/python/lattice_test.py | 1472 +++++++++++ tensorflow_lattice/python/lib/__init__.py | 19 - .../python/lib/keypoints_initialization.py | 504 ---- .../lib/keypoints_initialization_test.py | 480 ---- .../python/lib/lattice_layers.py | 404 --- .../python/lib/lattice_layers_test.py | 628 ----- .../python/lib/monotone_linear_layers.py | 278 -- .../python/lib/monotone_linear_layers_test.py | 353 --- .../python/lib/pwl_calibration_layers.py | 622 ----- .../python/lib/pwl_calibration_layers_test.py | 586 ----- tensorflow_lattice/python/lib/regularizers.py | 598 ----- .../python/lib/regularizers_test.py | 1021 -------- tensorflow_lattice/python/lib/test_data.py | 194 -- tensorflow_lattice/python/lib/tools.py | 421 --- tensorflow_lattice/python/lib/tools_test.py | 360 --- tensorflow_lattice/python/linear_layer.py | 331 +++ tensorflow_lattice/python/linear_lib.py | 253 ++ tensorflow_lattice/python/linear_test.py | 543 ++++ tensorflow_lattice/python/model_info.py | 109 + tensorflow_lattice/python/ops/__init__.py | 19 - tensorflow_lattice/python/ops/lattice_ops.py | 94 - .../python/ops/pwl_calibration_ops.py | 69 - .../python/parallel_combination_layer.py | 164 ++ .../python/parallel_combination_test.py | 140 + .../python/pwl_calibration_layer.py | 966 +++++++ .../python/pwl_calibration_lib.py | 998 ++++++++ .../python/pwl_calibration_test.py | 1119 ++++++++ tensorflow_lattice/python/test_utils.py | 276 ++ tensorflow_lattice/python/utils.py | 174 ++ tensorflow_lattice/python/utils_test.py | 51 + tensorflow_lattice/python/visualization.py | 524 ++++ tensorflow_lattice/tensorflow_lattice.bzl | 42 - 210 files changed, 20050 insertions(+), 27440 deletions(-) delete mode 100644 .bazelrc delete mode 100644 .gitignore delete mode 100644 BUILD delete mode 100644 INSTALL.md delete mode 100644 MANIFEST.in create mode 100644 build_docs.py delete mode 100644 build_tools/ci_build/ci_common.sh delete mode 100755 build_tools/ci_build/macosx/py2.sh delete mode 100755 build_tools/ci_build/macosx/py3.sh delete mode 100755 build_tools/ci_build/ubuntu/py2.sh delete mode 100755 build_tools/ci_build/ubuntu/py3.sh delete mode 100755 build_tools/common.sh delete mode 100755 build_tools/release_build/py2.sh delete mode 100755 build_tools/release_build/py2_gpu.sh delete mode 100755 build_tools/release_build/py3.sh delete mode 100755 build_tools/release_build/py3_gpu.sh delete mode 100644 build_tools/release_build/release_common.sh create mode 100644 docs/_book.yaml create mode 100644 docs/_index.yaml rename {g3doc => docs}/images/2d_lattice.png (100%) create mode 100644 docs/images/data_dist.png create mode 100644 docs/images/favicon.ico create mode 100644 docs/images/flexible_fit.png create mode 100644 docs/images/linear_fit.png create mode 100644 docs/images/model_comparison.png create mode 100644 docs/images/monotonic_fit.png create mode 100644 docs/images/pwl_calibration_distance.png create mode 100644 docs/images/pwl_calibration_price.png create mode 100644 docs/images/regularized_fit.png rename {g3doc => docs}/images/tensorflow_lattice.png (100%) create mode 100644 docs/install.md create mode 100644 docs/overview.md create mode 100644 docs/tutorials/canned_estimators.ipynb create mode 100644 docs/tutorials/custom_estimators.ipynb create mode 100644 docs/tutorials/keras_layers.ipynb create mode 100644 docs/tutorials/shape_constraints.ipynb create mode 100644 examples/BUILD create mode 100644 examples/canned_estimators_uci_heart.py delete mode 100644 examples/coffee_test.py create mode 100644 examples/custom_estimators_uci_heart.py delete mode 100644 examples/estimator_test.py delete mode 100644 examples/etl_1d.py delete mode 100644 examples/image_compression.py create mode 100644 examples/keras_functional_uci_heart.py create mode 100644 examples/keras_sequential_uci_heart.py delete mode 100644 examples/lattice_test.py delete mode 100644 examples/uci_census.py delete mode 100644 g3doc/api_docs/python/_toc.yaml delete mode 100644 g3doc/api_docs/python/index.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/CalibratedEtlHParams.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/CalibratedHParams.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/CalibratedLatticeHParams.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/CalibratedLinearHParams.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/CalibratedRtlHParams.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/PerFeatureHParams.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/calibrated_etl_classifier.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/calibrated_etl_regressor.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/calibrated_lattice_classifier.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/calibrated_lattice_regressor.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/calibrated_linear_classifier.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/calibrated_linear_regressor.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/calibrated_rtl_classifier.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/calibrated_rtl_regressor.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/calibration_layer.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/calibrator_regularization.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/ensemble_lattices_layer.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/input_calibration_layer.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/input_calibration_layer_from_hparams.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/lattice.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/lattice_layer.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/lattice_regularization.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/load_keypoints_from_quantiles.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/monotone_lattice.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/monotonic_projection.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/pwl_indexing_calibrator.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/save_quantiles_for_keypoints.md delete mode 100644 g3doc/api_docs/python/tensorflow_lattice/uniform_keypoints_for_signal.md delete mode 100644 g3doc/images/data_dist.png delete mode 100644 g3doc/images/deep_lattice_networks.png delete mode 100644 g3doc/images/mono_1_of_4.png delete mode 100644 g3doc/images/mono_2_of_4.png delete mode 100644 g3doc/images/mono_3_of_4.png delete mode 100644 g3doc/images/mono_4_of_4.png delete mode 100644 g3doc/images/pwl_calibration_distance.png delete mode 100644 g3doc/images/pwl_calibration_price.png delete mode 100644 g3doc/tutorial/images/2d_lattice.png delete mode 100644 g3doc/tutorial/images/data_dist.png delete mode 100644 g3doc/tutorial/images/deep_lattice_networks.png delete mode 100644 g3doc/tutorial/images/mono_1_of_4.png delete mode 100644 g3doc/tutorial/images/mono_2_of_4.png delete mode 100644 g3doc/tutorial/images/mono_3_of_4.png delete mode 100644 g3doc/tutorial/images/mono_4_of_4.png delete mode 100644 g3doc/tutorial/images/pwl_calibration_distance.png delete mode 100644 g3doc/tutorial/images/pwl_calibration_price.png delete mode 100644 g3doc/tutorial/index.md delete mode 100755 pip_pkg.sh delete mode 160000 tensorflow delete mode 100644 tensorflow_lattice/cc/BUILD delete mode 100644 tensorflow_lattice/cc/kernels/BUILD delete mode 100644 tensorflow_lattice/cc/kernels/hypercube_interpolation_kernels.cc delete mode 100644 tensorflow_lattice/cc/kernels/lattice_interpolation_base.cc delete mode 100644 tensorflow_lattice/cc/kernels/lattice_interpolation_base.h delete mode 100644 tensorflow_lattice/cc/kernels/lattice_raw_iterator.cc delete mode 100644 tensorflow_lattice/cc/kernels/lattice_raw_iterator.h delete mode 100644 tensorflow_lattice/cc/kernels/lattice_raw_iterator_test.cc delete mode 100644 tensorflow_lattice/cc/kernels/monotone_lattice_kernels.cc delete mode 100644 tensorflow_lattice/cc/kernels/monotonic_lattice_projections.h delete mode 100644 tensorflow_lattice/cc/kernels/monotonic_lattice_projections_test.cc delete mode 100644 tensorflow_lattice/cc/kernels/monotonic_projection_kernel.cc delete mode 100644 tensorflow_lattice/cc/kernels/monotonic_projections.h delete mode 100644 tensorflow_lattice/cc/kernels/pwl_indexing_calibrator_kernels.cc delete mode 100644 tensorflow_lattice/cc/kernels/simplex_interpolation_kernels.cc delete mode 100644 tensorflow_lattice/cc/lib/BUILD delete mode 100644 tensorflow_lattice/cc/lib/lattice_structure.cc delete mode 100644 tensorflow_lattice/cc/lib/lattice_structure.h delete mode 100644 tensorflow_lattice/cc/ops/hypercube_interpolation_ops_test.cc delete mode 100644 tensorflow_lattice/cc/ops/hypercube_interpolation_ops_test.h delete mode 100644 tensorflow_lattice/cc/ops/hypercube_interpolation_ops_test_p.cc delete mode 100644 tensorflow_lattice/cc/ops/lattice_interpolation_ops.cc delete mode 100644 tensorflow_lattice/cc/ops/monotone_lattice_ops.cc delete mode 100644 tensorflow_lattice/cc/ops/monotone_lattice_ops_test.cc delete mode 100644 tensorflow_lattice/cc/ops/monotonic_projection_op.cc delete mode 100644 tensorflow_lattice/cc/ops/monotonic_projection_op_test.cc delete mode 100644 tensorflow_lattice/cc/ops/pwl_indexing_calibrator_ops.cc delete mode 100644 tensorflow_lattice/cc/ops/pwl_indexing_calibrator_ops_test.cc delete mode 100644 tensorflow_lattice/cc/ops/simplex_interpolation_ops_test.cc delete mode 100644 tensorflow_lattice/cc/test_tools/test_main.cc delete mode 100644 tensorflow_lattice/cc/tflite_ops/BUILD delete mode 100644 tensorflow_lattice/cc/tflite_ops/README.md delete mode 100644 tensorflow_lattice/cc/tflite_ops/freeze_graph_wrapper.py delete mode 100644 tensorflow_lattice/cc/tflite_ops/helpers.h delete mode 100644 tensorflow_lattice/cc/tflite_ops/hypercube_interpolation_test.cc delete mode 100644 tensorflow_lattice/cc/tflite_ops/interpolation.cc delete mode 100644 tensorflow_lattice/cc/tflite_ops/pwl_indexing_calibrator.cc delete mode 100644 tensorflow_lattice/cc/tflite_ops/pwl_indexing_calibrator_sparse_test.cc delete mode 100644 tensorflow_lattice/cc/tflite_ops/pwl_indexing_calibrator_test.cc delete mode 100644 tensorflow_lattice/cc/tflite_ops/simplex_interpolation_test.cc delete mode 100644 tensorflow_lattice/cc/tflite_ops/tflite_ops.cc delete mode 100644 tensorflow_lattice/cc/tflite_ops/tflite_ops.h delete mode 100644 tensorflow_lattice/cc/tflite_ops/toco_wrapper.py rename tensorflow_lattice/{python/estimators => layers}/__init__.py (55%) create mode 100644 tensorflow_lattice/python/categorical_calibration_layer.py create mode 100644 tensorflow_lattice/python/categorical_calibration_lib.py create mode 100644 tensorflow_lattice/python/categorical_calibration_test.py create mode 100644 tensorflow_lattice/python/configs.py create mode 100644 tensorflow_lattice/python/configs_test.py create mode 100644 tensorflow_lattice/python/estimators.py delete mode 100644 tensorflow_lattice/python/estimators/BUILD delete mode 100644 tensorflow_lattice/python/estimators/base.py delete mode 100644 tensorflow_lattice/python/estimators/base_test.py delete mode 100644 tensorflow_lattice/python/estimators/calibrated.py delete mode 100644 tensorflow_lattice/python/estimators/calibrated_etl.py delete mode 100644 tensorflow_lattice/python/estimators/calibrated_etl_test.py delete mode 100644 tensorflow_lattice/python/estimators/calibrated_lattice.py delete mode 100644 tensorflow_lattice/python/estimators/calibrated_lattice_test.py delete mode 100644 tensorflow_lattice/python/estimators/calibrated_linear.py delete mode 100644 tensorflow_lattice/python/estimators/calibrated_linear_test.py delete mode 100644 tensorflow_lattice/python/estimators/calibrated_rtl.py delete mode 100644 tensorflow_lattice/python/estimators/calibrated_rtl_test.py delete mode 100644 tensorflow_lattice/python/estimators/calibrated_test.py delete mode 100644 tensorflow_lattice/python/estimators/hparams.py delete mode 100644 tensorflow_lattice/python/estimators/hparams_test.py delete mode 100644 tensorflow_lattice/python/estimators/separately_calibrated_rtl.py delete mode 100644 tensorflow_lattice/python/estimators/separately_calibrated_rtl_test.py create mode 100644 tensorflow_lattice/python/estimators_test.py delete mode 100644 tensorflow_lattice/python/kernel_tests/BUILD delete mode 100644 tensorflow_lattice/python/kernel_tests/lattice_gradient_test.py delete mode 100644 tensorflow_lattice/python/kernel_tests/pwl_calibration_test.py create mode 100644 tensorflow_lattice/python/lattice_layer.py create mode 100644 tensorflow_lattice/python/lattice_lib.py create mode 100644 tensorflow_lattice/python/lattice_test.py delete mode 100644 tensorflow_lattice/python/lib/__init__.py delete mode 100644 tensorflow_lattice/python/lib/keypoints_initialization.py delete mode 100644 tensorflow_lattice/python/lib/keypoints_initialization_test.py delete mode 100644 tensorflow_lattice/python/lib/lattice_layers.py delete mode 100644 tensorflow_lattice/python/lib/lattice_layers_test.py delete mode 100644 tensorflow_lattice/python/lib/monotone_linear_layers.py delete mode 100644 tensorflow_lattice/python/lib/monotone_linear_layers_test.py delete mode 100644 tensorflow_lattice/python/lib/pwl_calibration_layers.py delete mode 100644 tensorflow_lattice/python/lib/pwl_calibration_layers_test.py delete mode 100644 tensorflow_lattice/python/lib/regularizers.py delete mode 100644 tensorflow_lattice/python/lib/regularizers_test.py delete mode 100644 tensorflow_lattice/python/lib/test_data.py delete mode 100644 tensorflow_lattice/python/lib/tools.py delete mode 100644 tensorflow_lattice/python/lib/tools_test.py create mode 100644 tensorflow_lattice/python/linear_layer.py create mode 100644 tensorflow_lattice/python/linear_lib.py create mode 100644 tensorflow_lattice/python/linear_test.py create mode 100644 tensorflow_lattice/python/model_info.py delete mode 100644 tensorflow_lattice/python/ops/__init__.py delete mode 100644 tensorflow_lattice/python/ops/lattice_ops.py delete mode 100644 tensorflow_lattice/python/ops/pwl_calibration_ops.py create mode 100644 tensorflow_lattice/python/parallel_combination_layer.py create mode 100644 tensorflow_lattice/python/parallel_combination_test.py create mode 100644 tensorflow_lattice/python/pwl_calibration_layer.py create mode 100644 tensorflow_lattice/python/pwl_calibration_lib.py create mode 100644 tensorflow_lattice/python/pwl_calibration_test.py create mode 100644 tensorflow_lattice/python/test_utils.py create mode 100644 tensorflow_lattice/python/utils.py create mode 100644 tensorflow_lattice/python/utils_test.py create mode 100644 tensorflow_lattice/python/visualization.py delete mode 100644 tensorflow_lattice/tensorflow_lattice.bzl diff --git a/.bazelrc b/.bazelrc deleted file mode 100644 index 785a5d6..0000000 --- a/.bazelrc +++ /dev/null @@ -1,12 +0,0 @@ -# Import TensorFlow configuration. -import %workspace%/tensorflow/.tf_configure.bazelrc - -# Coloring for error messages. -common --color=yes - -# Always print test errors. -test --test_output=errors - -# Other build flags. -build --define=grpc_no_ares=true -test --define=grpc_no_ares=true diff --git a/.gitignore b/.gitignore deleted file mode 100644 index c37242c..0000000 --- a/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -.DS_Store -.ipynb_checkpoints -node_modules -/.bazelrc -/bazel-* -/bazel_pip -/pip_test -/_python_build -*.pyc -__pycache__ -*.swp -.vscode/ diff --git a/BUILD b/BUILD deleted file mode 100644 index b5e7ca5..0000000 --- a/BUILD +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================= -sh_binary( - name = "pip_pkg", - srcs = ["pip_pkg.sh"], - data = [ - "MANIFEST.in", - "setup.py", - "//tensorflow_lattice", - ], -) diff --git a/INSTALL.md b/INSTALL.md deleted file mode 100644 index f8c7cb2..0000000 --- a/INSTALL.md +++ /dev/null @@ -1,202 +0,0 @@ - -# TensorFlow Lattice installation - -TensorFlow Lattice runs on Ubuntu and Mac OS X, and requires TensorFlow. - -We highly recommend to read [TensorFlow installation -instructions](https://www.tensorflow.org/install), especially [Installing -TensorFlow on Ubuntu](https://www.tensorflow.org/install/install_linux) to -understand virtualenv and pip, and [Installing TensorFlow from -Sources](https://www.tensorflow.org/install/install_sources). - -# Install the prebuilt pip package - -## Activate virtualenv -If using virtualenv, activate your virtualenv for the rest of the installation, -otherwise skip this step: - -``` shell -~$ virtualenv --system-site-packages tensorflow-lattice # for Python 2.7 -~$ virtualenv --system-site-packages -p python3 tensorflow-lattice # for Python 3.n -``` - -Here you can change `tensorflow-lattice` to another target directory you want to -use. - -```shell -~$ source tensorflow-lattice/bin/activate # bash, sh, ksh, or zsh -~$ source tensorflow-lattice/bin/activate.csh # csh or tcsh -``` - -## Install pip packages. -You can use pip install to install tensorflow-lattice pip package. - -```shell -(tensorflow-lattice)$ pip install --upgrade tensorflow-lattice # for Python 2.7 -(tensorflow-lattice)$ pip3 install --upgrade tensorflow-lattice # for Python 3.n -(tensorflow-lattice)$ pip install --upgrade tensorflow-lattice-gpu # for Python 2.7 and GPU -(tensorflow-lattice)$ pip3 install --upgrade tensorflow-lattice-gpu # for Python 3.n and GPU -``` -Our custom operators do not have GPU kernels. The main difference -between `tensorflow-lattice-gpu` and `tensorflow-lattice` pip package is that -the former requires `tensorflow-gpu` pip package whereas the latter requires -`tensorflow` pip package. - -## Test TensorFlow and TensorFlow Lattice - -Run the following python script to test TensorFlow Lattice. - -```python -import tensorflow as tf -import tensorflow_lattice as tfl - -x = tf.compat.v1.placeholder(tf.float32, shape=(None, 2)) -(y, _, _, _) = tfl.lattice_layer(x, lattice_sizes=(2, 2)) - -with tf.Session() as sess: - sess.run(tf.global_variables_initializer()) - print(sess.run(y, feed_dict={x: [[0.0, 0.0]]})) -``` - -Now you are ready to use *TensorFlow Lattice*. Check out examples in the -[examples](https://github.com/tensorflow/lattice/tree/master/examples) directory -and run them if you need more examples to run. -[Tutorial](g3doc/tutorial/index.md) contains detailed explanation on how to use -TensorFlow Lattice. - -You can stop here unless you want to build TensorFlow Lattice from the source. - -# Build TensorFlow Lattice and TensorFlow pip package from the source. -You can also build TensorFlow Lattice packages from the source. -For this, you will need to compile all libraries using -[Bazel](https://bazel.build) against TensorFlow headers. - - -We will show how to build TensorFlow and TensorFlow Lattice pip package using -Bazel, and install it to your virtualenv. - -## Activate virtualenv - -If using virtualenv, activate your virtualenv for the rest of the installation, -otherwise skip this step: - -```shell -~$ source $VIRTUALENV_PATH/bin/activate # bash, sh, ksh, or zsh -~$ source $VIRTUALENV_PATH/bin/activate.csh # csh or tcsh -``` - -or if you are using virtualenv for the first time, - -```shell -~$ sudo apt-get install python-virtualenv -~$ virtualenv --system-site-packages tensorflow-lattice -~$ source ~/tensorflow-lattice/bin/activate # bash, sh, ksh, or zsh -~$ source ~/tensorflow-lattice/bin/activate.csh # csh or tcsh -``` -## Prepare TensorFlow envirnoment for Linux. - -Please follow instructions in [Prepare environment for -Linux](https://www.tensorflow.org/install/install_sources#prepare_environment_for_linux) -to setup the environment for TensorFlow. - -## Clone the TensorFlow Lattice repository. - -Let us clone the TensorFlow Lattice repository, which contains TensorFlow as a -submodule: - -```shell -(tensorflow-lattice)~$ git clone --recursive https://github.com/tensorflow/lattice.git -``` - -## Configure TensorFlow and build TensorFlow pip package. - -### Configure TensorFlow - -We now need to configure TensorFlow options. See [Configure the -installation](https://www.tensorflow.org/install/install_sources#configure_the_installation) -for the details. - -```shell -(tensorflow-lattice)~$ cd lattice -(tensorflow-lattice)~/lattice$ cd tensorflow -(tensorflow-lattice)~/lattice/tensorflow$ ./configure -``` - -### Build TensorFlow pip packaging script - -We are ready to build the TensorFlow pip package. See [Build the pip -package](https://www.tensorflow.org/install/install_sources#build_the_pip_package) -for the details. - -To build a pip package for TensorFlow with CPU-only support: - -```shell -(tensorflow-lattice)~/lattice/tensorflow$ bazel build \ - --config=opt \ - tensorflow/tools/pip_package:build_pip_package -``` - -To build a pip package for TensorFlow with GPU support: - -```shell -(tensorflow-lattice)~/lattice/tensorflow$ bazel build \ - --config=cuda \ - tensorflow/tools/pip_package:build_pip_package -``` - -### Install TensorFlow pip package - -```shell -(tensorflow-lattice)~/lattice/tensorflow$ bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg -(tensorflow-lattice)~/lattice/tensorflow$ pip install /tmp/tensorflow_pkg/*.whl -``` - -### Build TensorFlow Lattice pip packaging script - -To build a pip package for TensorFlow with CPU-only support: - -```shell -(tensorflow-lattice)~/$ cd ~/lattice -(tensorflow-lattice)~/lattice$ bazel build \ - --config=opt :pip_pkg -``` - -### Install TensorFlow Lattice pip package - -```shell -(tensorflow-lattice)~/lattice$ bazel-bin/pip_pkg /tmp/tensorflow_lattice_pkg -(tensorflow-lattice)~/lattice$ pip install /tmp/tensorflow_lattice_pkg/*.whl -``` - -### Test TensorFlow and TensorFlow Lattice -```shell -(tensorflow-lattice)~/lattice$ cd examples -(tensorflow-lattice)~/lattice/examples$ python test.py -``` - -test.py is a simple python script. - -```python -import tensorflow as tf -import tensorflow_lattice as tfl - -x = tf.compat.v1.placeholder(tf.float32, shape=(None, 2)) -(y, _, _, _) = tfl.lattice_layer(x, lattice_sizes=(2, 2)) - -with tf.Session() as sess: - sess.run(tf.global_variables_initializer()) - print(sess.run(y, feed_dict={x: [[0.0, 0.0]]})) -``` diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 3b46ccd..0000000 --- a/MANIFEST.in +++ /dev/null @@ -1,6 +0,0 @@ -include README.md LICENSE BUILD -recursive-include tensorflow_lattice BUILD -graft tensorflow_lattice/cc -recursive-exclude tensorflow_lattice/cc *_test.cc *.so -recursive-exclude tensorflow_lattice/cc/test_tools * - diff --git a/README.md b/README.md index 230915f..71e848e 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ - -
- -
- # TensorFlow Lattice -This is an implementation of [Monotonic Calibrated Interpolated Look-Up Tables](http://jmlr.org/papers/v17/15-243.html) in [TensorFlow](https://www.tensorflow.org). +TensorFlow Lattice is a library that implements constrained and interpretable +lattice based models. It is an implementation of +[Monotonic Calibrated Interpolated Look-Up Tables](http://jmlr.org/papers/v17/15-243.html) +in [TensorFlow](https://www.tensorflow.org). -These are fast-to-evaluate and interpretable lattice models, also known as -interpolated look-up tables. This library also provides a rich and intuitive set -of regularizations and monotonicity constraints configurable per feature. +The library enables you to inject domain knowledge into +the learning process through common-sense or policy-driven shape constraints. +This is done using a collection of Keras layers that can satisfy constraints +such as monotonicity, convexity and pairwise trust: -It includes -[__TensorFlow estimators__](https://www.tensorflow.org/extend/estimators) for -regression and classification with the most common set ups for lattice models: +* PWLCalibration: piecewise linear calibration of signals. +* CategoricalCalibration: mapping of categorical inputs into real values. +* Lattice: interpolated look-up table implementation. +* Linear: linear function with monotonicity and norm constraints. + +The library also provides easy to setup canned estimators for common use cases: * Calibrated Linear * Calibrated Lattice -* Random Tiny Lattices (_RTL_) -* Embedded Tiny Lattices (_ETL_) (see [Deep Lattice Networks and Partial Monotonic Functions](https://research.google.com/pubs/pub46327.html)) - -Additionally this library provides two types of __model components__ -(or __layers__) that can be combined with other types of models (including -neural networks): +* Random Tiny Lattices (RTL) +* Crystals -* Calibration: piecewise linear calibration of signals. -* Lattice: interpolated look-up table implementation. +With TF Lattice you can use domain knowledge to better extrapolate to the parts +of the input space not covered by the training dataset. This helps avoid +unexpected model behaviour when the serving distribution is different from the +training distribution. +
+ +
You can install our prebuilt pip package using ```bash pip install tensorflow-lattice ``` - -but please see the [install](INSTALL.md) section for more detailed instructions. - -This [tutorial](g3doc/tutorial/index.md) contains more detailed explanation -about lattice models and usage in TensorFlow, and check out -[API docs](g3doc/api_docs/python/index.md) for python APIs. - -__TensorFlow Lattice is not an official Google product.__ diff --git a/WORKSPACE b/WORKSPACE index 0f4283f..06761c4 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -1,77 +1,16 @@ -# Copyright 2017 The TensorFlow Lattice Authors. +# Copyright 2018 The TensorFlow Lattice Authors. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. # ============================================================================== -workspace(name = "tensorflow_lattice") - -local_repository( - name = "org_tensorflow", - path = "tensorflow", -) - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file") - -# This rule is from TensorFlow's WORKSPACE. -http_archive( - name = "io_bazel_rules_closure", - sha256 = "e0a111000aeed2051f29fcc7a3f83be3ad8c6c93c186e64beb1ad313f0c7f9f9", - strip_prefix = "rules_closure-cf1e44edb908e9616030cc83d085989b8e6cd6df", - urls = [ - "http://mirror.tensorflow.org/github.com/bazelbuild/rules_closure/archive/cf1e44edb908e9616030cc83d085989b8e6cd6df.tar.gz", - "https://github.com/bazelbuild/rules_closure/archive/cf1e44edb908e9616030cc83d085989b8e6cd6df.tar.gz", # 2019-04-04 - ], -) -# Apple and Swift rules. -http_archive( - name = "build_bazel_rules_apple", - sha256 = "23792cd999f97fc97284d1c44cb1324bfdd0bc54aa68ad513fa3705aca3b1f9e", - urls = ["https://github.com/bazelbuild/rules_apple/releases/download/0.15.0/rules_apple.0.15.0.tar.gz"], -) # https://github.com/bazelbuild/rules_apple/releases -http_archive( - name = "build_bazel_apple_support", - sha256 = "7356dbd44dea71570a929d1d4731e870622151a5f27164d966dda97305f33471", - urls = ["https://github.com/bazelbuild/apple_support/releases/download/0.6.0/apple_support.0.6.0.tar.gz"], -) # https://github.com/bazelbuild/apple_support/releases -http_archive( - name = "bazel_skylib", - sha256 = "2ef429f5d7ce7111263289644d233707dba35e39696377ebab8b0bc701f7818e", - urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/0.8.0/bazel-skylib.0.8.0.tar.gz"], -) # https://github.com/bazelbuild/bazel-skylib/releases -http_archive( - name = "build_bazel_rules_swift", - sha256 = "9efe9699e9765e6b4a5e063e4a08f6b163cccaf0443f775d935baf5c3cd6ed0e", - urls = ["https://github.com/bazelbuild/rules_swift/releases/download/0.9.0/rules_swift.0.9.0.tar.gz"], -) # https://github.com/bazelbuild/rules_swift/releases -http_archive( - name = "com_github_apple_swift_swift_protobuf", - type = "zip", - strip_prefix = "swift-protobuf-1.5.0/", - urls = ["https://github.com/apple/swift-protobuf/archive/1.5.0.zip"], -) # https://github.com/apple/swift-protobuf/releases -http_file( - name = "xctestrunner", - executable = 1, - urls = ["https://github.com/google/xctestrunner/releases/download/0.2.7/ios_test_runner.par"], -) # https://github.com/google/xctestrunner/releases -# Use `swift_rules_dependencies` to fetch the toolchains. With the -# `git_repository` rules above, the following call will skip redefining them. -load("@build_bazel_rules_swift//swift:repositories.bzl", "swift_rules_dependencies") -swift_rules_dependencies() - -load("@org_tensorflow//tensorflow:workspace.bzl", "tf_workspace") - -tf_workspace( - path_prefix = "", - tf_repo_name = "org_tensorflow", -) +workspace(name = "tensorflow_lattice") diff --git a/build_docs.py b/build_docs.py new file mode 100644 index 0000000..74c317b --- /dev/null +++ b/build_docs.py @@ -0,0 +1,85 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Generate docs API for TF Lattice. + +Example run: + +``` +python build_docs.py --output_dir=/path/to/output +``` +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys + +from absl import app +from absl import flags + +from tensorflow_docs.api_generator import generate_lib +from tensorflow_docs.api_generator import public_api + +import tensorflow_lattice as tfl + +flags.DEFINE_string('output_dir', '/tmp/tfl_api/', + 'The path to output the files to') + +flags.DEFINE_string( + 'code_url_prefix', + 'https://github.com/tensorflow/lattice/blob/master/tensorflow_lattice', + 'The url prefix for links to code.') + +flags.DEFINE_bool('search_hints', True, + 'Include metadata search hints in the generated files') + +flags.DEFINE_string('site_path', 'lattice/api_docs/python', + 'Path prefix in the _toc.yaml') + +FLAGS = flags.FLAGS + + +def local_definitions_filter(path, parent, children): + """Filters local imports, except for the tfl.layers module.""" + if path == ('tfl', 'layers'): + return children + return public_api.local_definitions_filter(path, parent, children) + + +def main(_): + private_map = { + 'tfl': ['python'], + 'tfl.categorical_calibration_layer': ['CategoricalCalibration'], + 'tfl.lattice_layer': ['Lattice'], + 'tfl.linear_layer': ['Linear'], + 'tfl.pwl_calibration_layer': ['PWLCalibration'], + 'tfl.parallel_combination_layer': ['ParallelCombination'] + } + doc_generator = generate_lib.DocGenerator( + root_title='TensorFlow Lattice 2.0', + py_modules=[('tfl', tfl)], + base_dir=os.path.dirname(tfl.__file__), + code_url_prefix=FLAGS.code_url_prefix, + search_hints=FLAGS.search_hints, + site_path=FLAGS.site_path, + private_map=private_map, + callbacks=[local_definitions_filter]) + + sys.exit(doc_generator.build(output_dir=FLAGS.output_dir)) + + +if __name__ == '__main__': + app.run(main) diff --git a/build_tools/ci_build/ci_common.sh b/build_tools/ci_build/ci_common.sh deleted file mode 100644 index d368c68..0000000 --- a/build_tools/ci_build/ci_common.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Run tensorflow lattice bazel tests. -function tensorflow_lattice_test { - # Cleaning up bazel workspace - bazel clean - - if [[ "${IS_MAC}" == true ]]; then - N_JOBS=$(sysctl -n hw.ncpu) - else - N_JOBS=$(grep -c ^processor /proc/cpuinfo) - fi - - echo "" - echo "Bazel will use ${N_JOBS} concurrent job(s)." - echo "" - - bazel test --config=opt --test_tag_filters=-gpu -k \ - --jobs=${N_JOBS} --test_timeout 300,450,1200,3600 --build_tests_only \ - --test_output=errors -- \ - //tensorflow_lattice/... -} diff --git a/build_tools/ci_build/macosx/py2.sh b/build_tools/ci_build/macosx/py2.sh deleted file mode 100755 index b36846b..0000000 --- a/build_tools/ci_build/macosx/py2.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# This script will run the bash function tensorflow_lattice_test under a python2 -# environment. - -set -e -set -x - -# Source common scripts. -source "build_tools/common.sh" - -export IS_MAC=true -export TFL_PY="py2" -export TFL_USE_GPU=false - -# Prepare build. -prepare_build - -# Source common ci scripts. -source "build_tools/ci_build/ci_common.sh" - -# Activate virtualenv. -source ${TFL_ENV_PATH}/bin/activate - -echo "Running all tests." -tensorflow_lattice_test -echo "Done with testing." - -deactivate diff --git a/build_tools/ci_build/macosx/py3.sh b/build_tools/ci_build/macosx/py3.sh deleted file mode 100755 index 14bccc8..0000000 --- a/build_tools/ci_build/macosx/py3.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# This script will run the bash function tensorflow_lattice_test under a python3 -# environment. - -set -e -set -x - -# Source common scripts. -source "build_tools/common.sh" - -export IS_MAC=true -export TFL_PY="py3" -export TFL_USE_GPU=false - -# Prepare build. -prepare_build - -# Source common ci scripts. -source "build_tools/ci_build/ci_common.sh" - -# Activate virtualenv. -source ${TFL_ENV_PATH}/bin/activate - -echo "Running all tests." -tensorflow_lattice_test -echo "Done with testing." - -deactivate diff --git a/build_tools/ci_build/ubuntu/py2.sh b/build_tools/ci_build/ubuntu/py2.sh deleted file mode 100755 index bd014b1..0000000 --- a/build_tools/ci_build/ubuntu/py2.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# This script will run the bash function tensorflow_lattice_test under a python2 -# environment. - -set -e -set -x - -# Source common scripts. -source "build_tools/common.sh" - -export IS_MAC=false -export TFL_PY="py2" -export TFL_USE_GPU=false - -# Prepare build. -prepare_build - -# Source common ci scripts. -source "build_tools/ci_build/ci_common.sh" - -# Activate virtualenv. -source ${TFL_ENV_PATH}/bin/activate - -echo "Running all tests." -tensorflow_lattice_test -echo "Done with testing." - -deactivate diff --git a/build_tools/ci_build/ubuntu/py3.sh b/build_tools/ci_build/ubuntu/py3.sh deleted file mode 100755 index 9e223ef..0000000 --- a/build_tools/ci_build/ubuntu/py3.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# This script will run the bash function tensorflow_lattice_test under a python3 -# environment. - -set -e -set -x - -# Source common scripts. -source "build_tools/common.sh" - -export IS_MAC=false -export TFL_PY="py3" -export TFL_USE_GPU=false - -# Prepare build. -prepare_build - -# Source common ci scripts. -source "build_tools/ci_build/ci_common.sh" - -# Activate virtualenv. -source ${TFL_ENV_PATH}/bin/activate - -echo "Running all tests." -tensorflow_lattice_test -echo "Done with testing." - -deactivate diff --git a/build_tools/common.sh b/build_tools/common.sh deleted file mode 100755 index e512180..0000000 --- a/build_tools/common.sh +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Git initialization -function git_init { - # Run configure. - export TF_NEED_GCP=0 - export TF_NEED_HDFS=0 - export PYTHON_BIN_PATH=$(which python) - - # Initialize git. - git init - - if [ -d "tensorflow" ]; then - echo "TensorFlow submodule exist. Checkout r1.14" - cd tensorflow - git checkout r1.14 - cd - - else - echo "Add TensorFlow r1.14 submodule." - git submodule add -b r1.14 https://github.com/tensorflow/tensorflow.git - fi - - # Fetch all submodules. - git submodule update --init --recursive - - # Configure tensorflow. - cd tensorflow - git show --oneline -s - yes "" | ./configure - - cd - - echo "Applying visibility change." - sed -i.bak -e 's/:internal/\/\/visibility:public/g' -- "tensorflow/tensorflow/BUILD" -} - -# Create virtualenv. -function create_virtualenv { - if [ "${TFL_PY}" = "py3" ]; then - echo "Setting up python 3 virtualenv" - export TFL_ENV_PATH=${TFL_ROOT}/tensorflow-lattice-env-py3 - virtualenv --system-site-packages -p python3 ${TFL_ENV_PATH} - else - echo "Setting up python 2 virtualenv" - export TFL_ENV_PATH=${TFL_ROOT}/tensorflow-lattice-env-py2 - virtualenv --system-site-packages -p python2.7 ${TFL_ENV_PATH} - fi - source ${TFL_ENV_PATH}/bin/activate - python -V - pip install --upgrade pip - pip install --upgrade six numpy wheel enum34 protobuf keras_applications keras_preprocessing tensorflow_estimator - deactivate -} - -# Prepare all necessary environment for bazel build & testing. -function prepare_build { - # modify default gcc on linux - if [ "$(uname)" == "Linux" ]; then - sudo update-alternatives --set gcc /usr/bin/gcc-4.8 - fi - - # If TFL_ROOT does not exist, create one in here. - if [ -z "${TFL_ROOT}" ]; then - echo "TFL_ROOT is empty, so set to /tmp/tfl_root." - export TFL_ROOT="/tmp/tfl_root" - fi - - # Create virtualenv. - create_virtualenv - - # Activate virtualenv. - source ${TFL_ENV_PATH}/bin/activate - - if [ "${TFL_USE_GPU}" = true ]; then - echo "GPU build -- Enable CUDA" - export TF_NEED_CUDA=1 - else - echo "CPU build -- No CUDA" - export TF_NEED_CUDA=0 - fi - - echo "Initialize git repo." - git_init - echo "Initialization is done." - - deactivate -} diff --git a/build_tools/release_build/py2.sh b/build_tools/release_build/py2.sh deleted file mode 100755 index 74de1dc..0000000 --- a/build_tools/release_build/py2.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -set -e -set -x - -# Source common scripts. -source "build_tools/common.sh" - -export TFL_PY="py2" -export TFL_USE_GPU=false - -# Prepare build. -prepare_build - -# Source common release scripts. -source "build_tools/release_build/release_common.sh" - -# Activate virtualenv. -source ${TFL_ENV_PATH}/bin/activate - -echo "Build pip package." -build_pip_pkg -echo "Done." - -echo "Install pip package and test." -install_pip_and_test -echo "Done." - -deactivate diff --git a/build_tools/release_build/py2_gpu.sh b/build_tools/release_build/py2_gpu.sh deleted file mode 100755 index 2731300..0000000 --- a/build_tools/release_build/py2_gpu.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -set -e -set -x - -# Source common scripts. -source "build_tools/common.sh" - -export TFL_PY="py2" -export TFL_USE_GPU=true - -# Prepare build. -prepare_build - -# Source common release scripts. -source "build_tools/release_build/release_common.sh" - -# Activate virtualenv. -source ${TFL_ENV_PATH}/bin/activate - -echo "Build pip package." -build_pip_pkg -echo "Done." - -echo "Install pip package and test." -install_pip_and_test -echo "Done." - -deactivate diff --git a/build_tools/release_build/py3.sh b/build_tools/release_build/py3.sh deleted file mode 100755 index 89a2f04..0000000 --- a/build_tools/release_build/py3.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -set -e -set -x - -# Source common scripts. -source "build_tools/common.sh" - -export TFL_PY="py3" -export TFL_USE_GPU=false - -# Prepare build. -prepare_build - -# Source common release scripts. -source "build_tools/release_build/release_common.sh" - -# Activate virtualenv. -source ${TFL_ENV_PATH}/bin/activate - -echo "Build pip package." -build_pip_pkg -echo "Done." - -echo "Install pip package and test." -install_pip_and_test -echo "Done." - -deactivate diff --git a/build_tools/release_build/py3_gpu.sh b/build_tools/release_build/py3_gpu.sh deleted file mode 100755 index a84defc..0000000 --- a/build_tools/release_build/py3_gpu.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -set -e -set -x - -# Source common scripts. -source "build_tools/common.sh" - -export TFL_PY="py3" -export TFL_USE_GPU=true - -# Prepare build. -prepare_build - -# Source common release scripts. -source "build_tools/release_build/release_common.sh" - -# Activate virtualenv. -source ${TFL_ENV_PATH}/bin/activate - -echo "Build pip package." -build_pip_pkg -echo "Done." - -echo "Install pip package and test." -install_pip_and_test -echo "Done." - -deactivate diff --git a/build_tools/release_build/release_common.sh b/build_tools/release_build/release_common.sh deleted file mode 100644 index c27b783..0000000 --- a/build_tools/release_build/release_common.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -function build_pip_pkg { - # Clean up bazel workspace - bazel clean - - if [ "${TFL_NATIVE}" = true ]; then - # Build pip install package. - bazel build \ - --define framework_shared_object=true \ - --copt="-D_GLIBCXX_USE_CXX11_ABI=0" \ - --compilation_mode=opt \ - --distinct_host_configuration=false \ - :pip_pkg - else - bazel build \ - --define framework_shared_object=true \ - --copt="-D_GLIBCXX_USE_CXX11_ABI=0" \ - --compilation_mode=opt \ - --cpu=k8 \ - --distinct_host_configuration=false \ - :pip_pkg - fi - - if [ -z "${TFL_ARTIFACTS_DIR}" ]; then - echo "TFL_ARTIFACTS_DIR is empty, so set tp /tmp/tfl_artifacts" - export TFL_ARTIFACTS_DIR="/tmp/tfl_artifacts" - fi - - # Create wheel to artifacts dir. - if [ "${TFL_USE_GPU}" = true ]; then - echo 'Building pip package for gpu' - ./bazel-bin/pip_pkg ${TFL_ARTIFACTS_DIR} --gpu - else - echo 'Building pip package for cpu' - ./bazel-bin/pip_pkg ${TFL_ARTIFACTS_DIR} - fi -} - -function install_pip_and_test { - # Check python version. - python -V - - # Install TensorFlow Lattice - pip install --upgrade ${TFL_ARTIFACTS_DIR}/*.whl - - # Run the example script to check whether it works or not. - cd examples - - # Check TensorFlow version - python -c 'import tensorflow as tf; print(tf.__version__)' - - echo 'running lattice example' - python lattice_test.py - echo 'running coffee example' - python coffee_test.py - echo 'running estimator example' - python estimator_test.py -} diff --git a/docs/_book.yaml b/docs/_book.yaml new file mode 100644 index 0000000..09c62f4 --- /dev/null +++ b/docs/_book.yaml @@ -0,0 +1,35 @@ +upper_tabs: +# Tabs left of dropdown menu +- include: /_upper_tabs_left.yaml +- include: /api_docs/_upper_tabs_api.yaml +# Dropdown menu +- name: Resources + path: /resources + is_default: true + menu: + - include: /resources/_menu_toc.yaml + lower_tabs: + # Subsite tabs + other: + - name: Guide & Tutorials + contents: + - title: Overview + path: /lattice/overview + - title: Install + path: /lattice/install + - heading: TensorFlow Lattice Tutorials + - title: Shape Constraints Tutorial + path: /lattice/tutorials/shape_constraints + - title: Keras Layers Tutorial + path: /lattice/tutorials/keras_layers + - title: Canned Estmators Tutorial + path: /lattice/tutorials/canned_estimators + - title: Custom Estimators Tutorial + path: /lattice/tutorials/custom_estimators + + - name: API + skip_translation: true + contents: + - include: /lattice/api_docs/python/_toc.yaml + +- include: /_upper_tabs_right.yaml diff --git a/docs/_index.yaml b/docs/_index.yaml new file mode 100644 index 0000000..e2ff8e9 --- /dev/null +++ b/docs/_index.yaml @@ -0,0 +1,81 @@ +book_path: /lattice/_book.yaml +project_path: /lattice/_project.yaml +description: +landing_page: + custom_css_path: /site-assets/css/style.css + rows: + - heading: Flexible, controlled and interpretable ML with lattice based models + items: + - classname: devsite-landing-row-50 + description: > +

TensorFlow Lattice is a library that implements constrained and interpretable lattice + based models. The library enables you to inject domain knowledge into the learning process + through common-sense or policy-driven + shape constraints. This is done using a + collection of Keras layers that can satisfy + constraints such as monotonicity, convexity and how features interact. The library also + provides easy to setup canned estimators.

+

With TF Lattice you can use domain knowledge to better extrapolate to the parts of the + input space not covered by the training dataset. This helps avoid unexpected model behaviour + when the serving distribution is different from the training distribution.

+
+ +
+ + code_block: | +
+        import numpy as np
+        import tensorflow as tf
+        import tensorflow_lattice as tfl
+
+        model = tf.keras.models.Sequential()
+        model.add(
+            tfl.layers.ParallelCombination([
+                # Monotonic piece-wise linear calibration with bounded output
+                tfl.layers.PWLCalibration(
+                    monotonicity='increasing',
+                    input_keypoints=np.linspace(1., 5., num=20),
+                    output_min=0.0,
+                    output_max=1.0),
+                # Diminishing returns
+                tfl.layers.PWLCalibration(
+                    monotonicity='increasing',
+                    convexity='concave',
+                    input_keypoints=np.linspace(0., 200., num=20),
+                    output_min=0.0,
+                    output_max=2.0),
+                # Partially monotonic categorical calibration: calib(0) <= calib(1)
+                tfl.layers.CategoricalCalibration(
+                    num_buckets=4,
+                    output_min=0.0,
+                    output_max=1.0,
+                    monotonicities=[(0, 1)]),
+            ]))
+        model.add(
+            tfl.layers.Lattice(
+                lattice_size=[2, 3, 2],
+                monotonicities='increasing',
+                # Trust: model is more responsive to input 0 if input 1 increases
+                edgeworth_trusts=(0, 1, 'positive')))
+        model.compile(...)
+        
+ + - classname: devsite-landing-row-cards + items: + - heading: "TensorFlow Lattice: Flexibility empowered by prior knowledge" + image_path: /resources/images/tf-logo-card-16x9.png + path: https://ai.googleblog.com/2017/10/tensorflow-lattice-flexibility.html + buttons: + - label: "Read on the Google AI blog" + path: https://ai.googleblog.com/2017/10/tensorflow-lattice-flexibility.html + - heading: "TensorFlow Lattice: Control your ML with monotonicity" + youtube_id: ABBnNjbjv2Q + buttons: + - label: Watch the video + path: https://www.youtube.com/watch?v=ABBnNjbjv2Q + - heading: "TF Lattice on GitHub" + image_path: /resources/images/github-card-16x9.png + path: https://github.com/tensorflow/lattice + buttons: + - label: "View on GitHub" + path: https://github.com/tensorflow/lattice diff --git a/g3doc/images/2d_lattice.png b/docs/images/2d_lattice.png similarity index 100% rename from g3doc/images/2d_lattice.png rename to docs/images/2d_lattice.png diff --git a/docs/images/data_dist.png b/docs/images/data_dist.png new file mode 100644 index 0000000000000000000000000000000000000000..63eceaba6a25493bf14ecfeb7286dd67a4917a6c GIT binary patch literal 8942 zcmeHtS5#EXw)O%8K?FoW5hW{#(oGa3YJ)bCB}0>Q(=@S(O>j#R5m1pR&?-WcGffVH zhzNot5s;iEOKecM#XiTg&;Rg0?1%es$JjL(uxeG+Tvgu;_02g5*HlNIq&-gyLD0#& zN{U(#M9~SJL#Qdi@8bNaM(`)pSH}RSW#Pr?;^u6NcC_Kd`MB6{+IXXFA;^1BH>!>1 zh%nu;m2B=yA9iz+&A$|0eDm`|QP98*rRcD8NBd{lSChne88-{Xnh;vgMwltjKK@EQ z^iBLM-aSW=cszsoTGPFbSBv}6#UUrpdTB3w$y&q-A7sSY8@bj!MRvES|6Eicxb0Bh zv$z&oM!07QLA1xwU`GaOs!~?YSeS*iv!xBp8|wnL3PCdR-Yyna7#kd?rHvihNtS!1 zx}KX8Z7s{KFRmt}=AvL@k5=+^v(ffd*Rk@&SlzJZmY1WI@s5h*@U}PFQ-oWNmQ%*f+Ez+S@y@?R zfSxS3Jr3t01&4cidBMCyVa{%LaN!#_Zoq{^;36V|V1}T(j}y+qThPg!=TOAIWGLFW zTe+cKaA;>I&O@0Nmd+kHS#EAH&iQZQhv@%Dx|93AK>%>T4@cm_Fd_JVPsgEc|3A|Y zNB)|AxL8UP?QP>|poqrWIJtu@$a0Ha7yVP%U!#WqH7YFnuhBzEQg^Wyb~gHGYn=UG z%l;ZrvO(L~gOmI}OElcjHUNXe(}EFh_#u+;e?tj01OJa|4)OWlZ~uJcZ(;loT>rrJ zw-ESSlmD@h+d|5(@mEx2g^B6l`UfYEsY>gMK|-vwyj5qHF09cpUop>d5d z2s%f3SMipP_u$g7zj4g;KGmv<#IW}ht|kk6?eH)j4a!DW>PN@_Kt6l-@o1MA|6O0L zsHaKZ6T$NZ{FjsjWaW4@SuI?64I>+f?=@Wb$DUmI+Ix|LN|nvAYI-F)$8$^J!jUd+ z`4xG;B7DF9(qI{Rd;&kZeSPsu9_lCrEfT_Z#{_MLT z+i%U^7Klxi@=(swF|GC0P1nlO$zEP|QiVZuH&|F$;^N|(!Wha+O5Aa{3ldJrIXO8c zCF{f6GK)_6mvRaUlBk3yf2n52<-ycX@ysW5IQ_5PO|XA$qI$F95$IvF?0$RO~UDG zp~YFa=>_vaJ$?N>GTDNi0@?+rnwpu(HF&0y3F(|&C;QbLKvNA zK7o5prx1@GJ@WN`@b>Ln**gf1OK^BW%li)>K0JQ>SWZRQDDS$U;HQ?B*?A)2{T{RV zTybHcW=`LcTwc1Hj*gDiPd|QC=HnyAUe!-xe+LBz2Qwm~E=t>e)iO0*URcP{%F52p zrelj{msLiMVEp7pS95_TL_|f~+S;rvEpbvQ^A}M@MkMFa5j#<6CkU8vMn7qLOWMoC z*jP(TtJk4Skz3eNhyP4-b2H-A>K1jJa(YNe$fZk{GTJzT8QG&}CnhW}oIo)F^M>5}~bq+JuQV1i^RMhz}o9hbtl^9yL#ZD7x+~lcIk1^y#K?7yE<0zCM9i z0T`^~vo5gEyLZDi`NcZKD~;JXIO=ip-JyRru>5@BjJt zjv2TH#=(J_YhYlYR7+K*NhC2hBO}z*q}YGYvqp{L2}w3w@pa~?A`&?}JuPTfCnGOU z7^%ke=L4BlNj$;4e0(JXi;ay4gp#UiJ22b|V;EO(t2OOW$`tkf$(w-;AQ-7$E-5a4 z#t1B0y}z(0L(QtlVB>*Jk%8*ri83-WN=wBC>(kOE-$%0LWM|u-=|-NVKDxiZ{~mcP zA|is7?v9bs`C9VsLP5{)UI;9V?6;pPi>GUO>*wcJVZ6N5sh-nUcd);gm6f%@&mmj? zy0VhhSsT(}@_iH;`RU`w?s=TJxHtgE@%mnRys60ZQw|sm3WZ90{hDq`{QOC^(|_FL zJVUFktqr1#>SfWXmo$2MdUA4dmPI>VNDk(RW_aBW_b@*rwSa)Y>({Rx92`cxCr=`% zYm=k$3$m_gXD}kppFe+DBUP39)xj2Ltn$SR7wE`s)*^VU?SngM!c)>F4a(`=@893a z(iz0ud(dnd5*)lZT3=5hk+QPFHQ0} zH8pe?R8?lO7@CQkm&m(pVi+kP#9Z3Jk@FaFUkxHJjb1q%xo#5L1o^ zS$y>-x{g#Q_BoVkW#PsfLxLH@6`7csn`hS*6cplNCQ{fzF{Acc|9Eh-wOSX0EAR8? z&-!Nm9;0>smPO|PhHvt&_{jwVl>zkT=jS(8T;XuI5kkDQwWGt+)%9MM4zTRj*4EG_ zOHe49kx|st)Vy;iREd|Cnp!CLWm8krsP96aa~=5(C*$Ymfji@2=qZ|`l!F;nRY{B! zi;Ir0-8D2iA3UH0jvW2$6e1}psoe}cGc}dj=U^oJp`&BQt&*9US!}e=!J~lB8klI0(qLr)%PBl^%Ps(54{7Dz2i5!^YnA(qXLn1Pxu1(%kWAFLA&-sp3@ z0gXuHkoD~y95jN(*KZACK-{snw->67d(+aR$bB!`{SCOLfBg7C$CjvS8xRm+Y@AFa z64zMGGmssVYaq6r=HM{X(rPppsxZzE$1UHBo>}t-+5v6D7Tg7BbQYLjP~Z%ryS4S3 zo1B?BIXuCI2G;@u4+K%9GtA7oIyy(Gs9IWD@IF3SIsz;aWq#xzwfODqzVFpFyR1-v zSCi9ll?QRp(#mRYGd>sb%Dl*+oTCCr?6VgGR{?G*Lz5pkNP_B2ZsWuzp@epqJf=F;bJ5nWAX1 zL>U+u(Q{l+ZebG*NKPXVMuTZi18RLl-=jFAq9WnOv!#h~9%}z&E4xlop>>g zUIwaX*kd8yOzL?aHa>8WTwp?9BMdkx;K{cJDL2;ZWAv)M%$9AMV++X<8Ql1@8j~K6TMvuD3YULi7ntoOPq3y-$|={#Quft#bILQ-MG3M`gK&e}?TEmbKSs;|6M4ruvt`wJ+7MD#X4QRw0 z4G+A+UFeh;&YhNz7tG5=4!9h*S$(A9Hu)mWQ8K7c6zKQ3;Ydfco7~sl1(SOk@PMm4 zE$sn{qwGp1Df~~(B!(9&`I^#FqfkW-OE2l<+^Ue#f~PrL{XMXQxvNU<0iUd$Mk)e& z^lGR8aNA8a#IqXAM|%if*n=_0U-O>~tLN14sX^pl73y2c#%1}Pwh^bF8S~*!PV5jn zeI3aWIdh#KbMnlVhAIUtx@H!KPpzkl)ml}qj)GU=rMCwuzwt#)l0)SWBI-ZZHdvCW zI_A3W6!erC-;;doVJ(yx`ry84LHWf`Wu?WhQQu^0hGlB-i*Ri7~yQ|pgs*s}6|e}sN-=yV%_HgKEcN=XwBviR$JlT(PtgvJ!wi@vq&%;9(n zWGkdPkdVAHgXxq%X#e(Mp8c-M+0TlpqDyf1j9-`Bs#;VF*4ScBZ%SPpUT)*Kz3sc7 zSUJC7ieGMBEzIh8+9UhgR{aJlaB#K!poODGVF;F*LfG+b2C{bBpq{gCy@tk&?Zh*a6jwOUs& zc=sOCiVFR6ma}7hz)`;GqgjXSLXi97_AFeyu68ogZ&@Q?Xb3QH1>P2r!Gh(TzW<%Q?IrD|C-aFd{l6lxeuk7Wchv=aPXbh$N`m zNokR6{EkJHoRFUh@;t zli0&_KB;`g#yH337in*aJ#~lr*V`SPlHHzXvwHd0ziq6Y-M}<_B(WH2aRvu>_#+0s zXc-y1j2m06liv@<9N@z&Bn8bXvws+n$t8AY;-@x=8lIl~Yu#4Z6E0?~vt!LC)b5G; zf2ltYmow_HEQ-(fo`y-UY?|vtKbZd@9p+JBfAa0Xrxcb;GwR#zetetX21GlYmLuZw z<~+_eU-H>l%)&5DzC*UL(#=fl@a_rId(Rh`L zg6fIHINn@kgHl{TX35ah%z9)nW6RplTX0vovYkq!*6>+E;==0ZiiLhDf69y(_Qs|1 z<@SO3;~D9(y4^`SO@_6=7=QV7wmediH#9Pgi2lhac2(7FqUFH2ZfH}r)ld*lbf ze8~WVH~sV6uTzuQnJ(X0OUq76VVs!cNZ8shh$q9$DhC^X)W`F-(?-A1)j@ptE+G*~ z5XwU01(tqY;5fL6pa$Upzd&p=HQwy;83;xi8(j13ZNE}W|FwcjMFlfrfRtc>PfD%q zs-f(VGN$++!RfJm#b@W3} z7k-$V7ggv?j^l?-CI`P2J1m2FRl%=czb-A!=6+N`UrVcd0VT)k))X+AKCOCreJPL7 z{laXKJT+XdRaYQow~w4^Feek9pKXtR3n*JnjZ7|~OveG8)OR4@u?Yt6`_5w!;INsh51{?bHd@LJ2F=i}>ltav?#5i$I%!iggr!fS&{^ zAPf4aX-U!|tO=v&mJFM1z zgidvPlO7~KhwtvVS7P;HR_x~HgDALr(ksM=Rqs`b z5``Fqgl4~Ez6}q5ik#W7yBKm4)1??FH@{zCIevq zCBHx-ay*r^wY8<3&J+WLfiyclo|rl3u6hhGao=BF$M7JX&&|vG^er6}&&JRgJG=go zktm-quBFyvUEia=6i>i~nau-w1NNGUU-Vd_<#sd4c#|p|`dfu7DB^H%5PFCjR^Kg@ zKt>sU)wCkpet)8eK`IuTz$lcb>*{KzClxdoFNgvav-Z(@+)saQ76giS-A7Nz$;;Q9 zRC~_jq%OyV_d}sD3}&IPpmx{akX*Ou#qR0mzP8FL(UmBg6p=^~v_-la4^MW^&2#$I z*-X|@q&fN^U}@yrHyKdbqoSfZjSU5yxb5ArbN#Qa$E>0?wY6)O4TSsZL}^o>iN~em z6B?gVmLxAz9N2WSNxM!c!Zp0iI0ylP=q!-=;PHFp^;rfNtO|9au+{tR(ZJ}^5P7Q0 zF*ii{U2HWuGboQg)yk4_op=`~h*FhUnEW344)IV$gm07?pU~($^v$At9Hp*SBX@ z`l3rO1@oVI1S(^okO2e*Lc})|5S@o*QAJ^)5Es|mjaVyCw+v>CyaOt7KGPp#`BJ?p zXU8cUg3^kL)`pzxYkk&1D)THY%_`7q#cNQ@!@~pcSTS}4&(y{yUS3&LJ z^Jh+`a4BhNV08)z#PKhkP)}TTW~QXul#;u938+wmx)i8G$G1(pNK%_Y=epOjwX&Wo zgA@W-B&Z}O3t1qPl$2atR)=*oR|K$G`T4MLMJ;XZG#!Bg(^@duY2=;6Es6$Bam#nq z^kG)chizqPX=!%W4vqHN9;QbC2fj&I6@2oS`~J=^4&C{;yN-42GjuP1p^Yiji$DCQ Okh{0l74s415B>))(Hr0Z literal 0 HcmV?d00001 diff --git a/docs/images/favicon.ico b/docs/images/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..41c37e45dca8353b9846b690e9fc312794936d77 GIT binary patch literal 941 zcmV;e15*5nP)dEk@4kEQ zIj$7{+u!z&@V`z^%^nL0-ylW1mB5xDsDDim1w~L01z17>&RZ6b005s(%^p(p^KnvkqLh{`jX~Pa(k(Qe>Ish7&0hfB}#;A`r8n z7UcrUCzi8rE0X0kUDS%!d`A5`7wCKxgzq^oouDN{=Z690Mx@n}@h&Qcky&lmc3f)B zjrIk*lfllRmgIs)iilvXXhd8W0zn2uL`R68h^%OI5e8|)tQWp(eEr&*uMga3+l4qP z&FruHh}uc6*1rd|2#4%RBQk=pVi83mN<0T0VYiuSoO|4`?R$UlgXb&gGNSUP@JJ~eG^{L>j0PzHuplO_G`puuONBHf z{NJ7+gy8+r;f+r!k-u3e;oOOpFRm5q*LJ##>7K3ME2~#4FMc_&OFj%d7l2e+iPI$( zr4Y%0pwi5!cNCN6QwP^hZRU#9ab@Y9>d0e%$F*BsChZxVOZSfVMt9=LgR3`kKq?~~uA%Qrr-Fgy^Y}L)Pe;uA&?74pVAtWkP#~!Hmy3G<$5V8-;!al-~m89A}X-n!+-#M`@;Bvw*Ua9rbPb(q*%%zr_dbH P00000NkvXXu0mjfwbivi literal 0 HcmV?d00001 diff --git a/docs/images/flexible_fit.png b/docs/images/flexible_fit.png new file mode 100644 index 0000000000000000000000000000000000000000..1957eb536b0b98633abe565a5c2888f298373bed GIT binary patch literal 21883 zcmagG1z1&I*FAdZ?(Pyr>F!2EBqgOAM7q0?1`+9!29c8P4yBRqknRSl|2qESeZT+x z?!7+n2%K~FUVH7e<{V?pF?X1f!V3&kQd9^8g7H#D>NNxcqXWJPkrBaPo=vbm2LHi3 zO1xA>2LHT}O+vx%D0VV$93c=iT0QwPVlwl>t9ENm>COw<-mPIdyUtp9lfi>-q>YdTtS4g^9Cc_}5X>Xx>b?(T)> zeE*>TayH+3EGXkFo4M-j*M~+1ho|`PG3w9Pu?aQc-^36|X&|VY5M5^%iV_My+H|+_>*@H|`>o-a-O#Z#S;5O@0rVFP`$l z<3CrX#zuw@=*MZ$_7kJVZa{mqjEf8p`|5+=EARr7I4q057XyFP-BNWQ@f zEindeJCsxKXq05RfY+W!0+t2o-&1wX$RuLbJ#y{BTo`>*@H4BTB9?~icTz@1D&@|R zf9clV?k*Y@RiyEKP7IsHx zXKY4B^m>1yG5AZmixLC=bcHd}&hGBu&5s{H*v?v>@@amSPL~b*xN1aPg&4|U>!xmL zm;VbpL#C1Fc*Q?gO_CZr?frybYHTbFgwOMWDLf)#pR)Gt&&NWi1DrB1U&26`bQ|zF z%?8Rz>@X1FQc_a~{YMPgG`Y^_Y9fcz_yc=-q(h}>AwB8}#>RB_hjx@dQiKo5*KL(K z>k*f`7HRvMUmdUZA*089O3{|i7c%giLkw*95`m2Dp6}bd<06en(3s=kue5%~sB8YB ze|&OAog`Sou;53VQ*ug5O4@xII%43AzqMQuw)v30re?cx#z#d%V>ju;Zag;) z7qu>9_Ov_k+1Z&WCg!vI?C!bT=3iJ?C?+o-HBrIM&7H(&pM7n9x-m#tR?FN}@`xAV zb9Vh&gksj{T7>8ios6gqkyH!C{b|+cMMU7D?KMyf5Fp$yFtN3{M_=v$a+CCB% z7LF>>(AU@BauPW8Y1WG|b8A~Rwc>Y2MVA@<)drzCgT)z#WgSP$E5tkZqlEp$Slw4V zMfbF8Zu=xzXV2QTDziT+Z98E!JxQzIX}9&fM)0)NsZrBj$pK`Q46b#%CBG16-Dp3eqZcIG#)gdWqMz{ zX^ejR_U+Z(<&yo)g*BuNtdrQZw8#%1uus_$17Y4xer@-Ify1Vvia|gl)a?2Iz1TKC zn9!)GZX_x%gX4SfCMxeU97HV2$mwa#+jUl7aozNH_r3fx9r7<kBO^B^Io z@ese9+8s7qDR?P~DIu}7HESDJ*NnPj#h%4tI-gX*^0h#VliSl_I-lZzO9s@Rm-)GxNLhc+qHuIrlW3QnDWnsa*TyV`Cyga|S=*fKm6aE>!Gxg5_X2~y*tyJZ}6Euzn_-sw?>00FrKTi zCgw0fUi7}y{gjZ9@^Dc(EYiu^dK)0@c|pyEzsmrK4>I5;>i z!npGA!HMUDO&I8w{o65SnQS`48+WPy^*xA|Les32HZdUciDT;(0 zw%lNzpP%zN@8DCqEyF}i`Q9GC5PG;h4GjzPSAEt!miOuy%yIHhEW9TQT??*D7!{h4 zyo+v~w8BRd8u}|>2oh3K@LfjidY=2-dyCC;W>z~)V78V#4~3gfhXgnFZCkDp`x9BA zX#={XzkX$sdj8ySEcd16HxrbeC^C>2qFzvWkFOq*XPv8H!;uc}o;tna=~&~);Hkx3J#I!f#G@B)?v5*cBQQupM9z-@&R zu1P9QE)Y-C>_H!?!i6PYYdxQEA&QK5&jn|$&Ylh=4s24=a7B@8c;APcrTrHDmoLMv zFIQKMg5JNM6*6~n;u^{j-Yq{|ixG`}xM_XpJXmNf(L+N+lT=oY?Ywq+w8z#>TwkV30MsS~+?voR`XmAH%CMpCP!^u?c; z6HX9Y7RbuVBIdOrU-Z~z9-o@(G+?7f{dbKt$C*?w3TwHW}xVwabfMu%L?7^J@BK{+K`m7f*l*m|Eybt?e1nh6e z9zo9)8Xn%!6^3VjFwfthwuKEF9ut$OZ4Y9h3sLO2WmJ1rv(IP8mSKsv&BwIAS4X9! zZf>T9d0G{s+~>;RAWc%RUZOdoUd>5^2xImeqzi-fEiH>*JZZF@s*5n|Q7Ozlr^Ib+ zSl(k%*;HqW=DE^1Io3h2*A4?kV0y%{20c*C!>7}y--}TnCE8Hgk)@uGY(*}a=+VOD za%mO2zPMz2@X4F6xMo9vm$=O9N1Bg5apJbts&z7hdv)-X{lTX>{MxU-cb?Y%F}A?_ zIJ&{4+?KrE+$m0op~6Ww2dl;pN~?0zPI8I1D=tzcDL(g;L7CHV zu@3AIF+~_&%V~X?56eDo_f5$oFVm8(OXra0Q`BRww&5ryu5IJ zlBeYjWUE#|z7O=6W1~M(2>EqVum&NY75%W9qcmXzq!!? z*+AOcvr@jDQf9WlonjL&72-r z4B$w9HrRH$^)~T<7g?<9l^NxG5>rjh(hlvlzp!1k%I!ze(b$Y;I;o2v_+|gHC4%B= z3;pP4DjyjQ|GdJ-aSsT*B(qM7ZW6@a4DZhfp+m7;4fHnrt$AWOu~F}g0uP(TtShqI zAmIAw7?nyCO_+iSiZad*DVrRo|P<8ot-ax&7T-(+E4)NFSh03bp0uyStt^~()y**=PPnm zHY*#dyb`vbsc{7^_hn9W|E8jVAJ$s0&_!d<29Y5Jwf*@A$ULCI$mXvh&#nQ3An`G+o{^h_wHa7MlHrBtYs!E}( zErnR_1d#7`k+17^YR?D&N{I#;MeJF;(RiV<+g4Ucx%;WvSFP$FtJ6(QLXo80sNTQN zU?2l&{CMxCN~^D10M;EG9NZ`f1NnY++ek$vZsNE^zqT5qiYZH88J{6MyLy3F%t~bV zT$TXDdxr3Uc7t-wIlZ!}+QyPm3#OJg+kA~*bcDs@Ber z7J1?t|FYmG{~dpQv0OBtW1gH=$>jF%cgnBTc8(4X)VN^)hubdl+AWKvGS5HB^upz} znHN(~zy#;wv7QZj8TWX`Ybhm#R4$Eg^tKXQOvKb2qtMSYj5GFOa&%CoQ?>mt1Y+LD#rZmiB|r)3_q~ z^?~aJwfrGI*P!Ab(;C4@>zSs5m<-ET0JSF(75mW5?#<@6h+9f`%t$B4LpqmZFMZ)@bK3isAIZ&6LMRxn(>d z_cLZ#`sj7Pt4Vzv>LIHRX#5MB@FT;<_DdIZ)!L5<5C4`l*ETmt_HcjMdVJ{nkSZ@M zZ6WA5UHyze`ej-sf20_-6UEhK^&0g-UHn_&GKm^Ix}m=P|B7XT%YcPYK7INWq^@p2 zi6D?-^^y(%;)e^df7a12yFjPO{@cHSy$bdK;Nu`bF0cTjufz@w4Q*?1-X$h5B6W>i zso{1^qWfUK`A?9z9`YRH-vXGun@HcwKT*#Eg_4r_?GhlAg2F{|8jm$s!yx(`j>P_A z0Nx|%-`D>8Ze&d`TOF@^5CrO0 zg}@#&VXtN>iaQbzL_0<(uZ{rwS)DP@eD3VbjgAN>At9l&-A#PZ5OsWxOVhJN=I&~S z*80TDCqulz_T?Icy^kI> zxdf&BA9|e&Dl4(gFP5SC6pVK$l^08+RHtG#6EHyc(+zfTZM3m{Mi;I;FKxITHPsRB z{B{-0&HqUiLF%$A09_J`c-2kZ8+N{5Ia%+IM0KXuQk-xr`o-gOYcF_vn+xLl{@L%h z01fz#SgwRi(K6tp7T-BLJ2wpsmOszcK@Tga2%kWtap<@Fke(hzA?PywRT0*Iq&kbh zt{2ekzhsHj%WWrtuR8U(IWJ;dlF9M5&OOjX}q152!o6@>!OY1yi7RzTP@1jxSBY5Sv)+c34&QZka5AEH0J05T3WrwAWV zCl}CWVQMQXaEMrR#7s@|)Rw9uBhh=hlR7>H+E0C)ToJ$DqGuFWZYKOB26^MeG1iOu zwl*=KF+JV?uMAd+qXl!><_`x|8;xe~U^Itp{$1yRiIdos&EKw(5r>I4M#uJ3VAU?9(*KcB98id#UK@$R;SL|b7KAFBH`I$%Blr57>I%HUurAqF*JQX zLm=2hTTw9uJ>q3k+Hwd5>cuqps1yz^=E+w4wajsPWg$uod#WT!EOF8~?1FR8;z~H| zvgGphB!F~`>8o(?ERJqS=c$TgCHwm3_V)JCrmB*Xfn(k&rV6S@GbMm-Gcz;LoIa8z z$?kP!7XsM8uV25!9^objGDFbG`LIEbsTSY?YAev7pvq=IKmeP|o@Qfz#nkvXRD#;b z1W-r2X{dTGDd=-TsiAu{DHqoptcb_8`v2tj8JUo$2(dCjVCy(mxf15Re#^b5%95Cq zmDRp84KyNh5iepOeD-)>E`-F0-Vfd11})5O@87nk!z=yzP#v#3-jODKQD-g z)hOn%t=;r$5cAjY{oXxD%3Z3)W3-#O&T^RtN3>f{_fN30xA5p0qK=h;5){+^1VbPp zL$@_3cm=BzD7>K%1Zu1o5T^O;mLVX}gg|vUfDsg!zYk3`jRuU%n5Y$g&@T|L%;*rs zT3>q*{s^(f%F^A;EK@UHD553S65hDgw_)DEM5`6V!PK^^&X@~~8G?v5he30Z>-EWc zWSZ68@AD)FE)IgM(!h6r|PF3A3}aKYaM$2bEG!H-}#U7z-8p z7aCo)*KN1kCa|>jXu49@jE`#5XDr5480t#e-~a5sx@lHcAHsjWQe0_zbzu}dal>Va z!RJ+piS-DvBF_e|bkY$}-#>-OmeUNUhG3YU12xfyTTYwBrZA{JC+u<72G--s;bDv) z+SFCO^N8X(J}t&iiW-MvRu1Q=L?nBJk)Z!A8ih;?v3s0l$mS)TY=rpn(1e4 zjSRzk0wx(&L_}oc4omn98vOBvHRA!$U3&R&Nsqx;jY6R_q>d5O8=UEv+3~ z_IzyA;57XH%Cl*Yoq!%+ZvN4vBB?2_?IbZdS`WI|t`x1$A+PU=2pM z7zW76&Ej+9kuRR>d_IoOd826PTP#$@X=lCd44YLf&bX|Q#)m`YbCu<;O^ppLZb60m zPft~J9{0lM+YfnB3)2(Rd3+z3Tcng|N+gV^x)Mw6MT3VGn*I`h(_d{#ETB9&#MC@g z;KB3P9sQFjZvb-x!3a4*>M|L`Ob}&mmH_StsqF8`20xWI_Do%u0qjktjC(<0YSWJzqh>+$329XTf zc`715-8X8GNUUMR7gbcb`=iNuQ`hRl5YfM-G#5B1**OBTHIz+)8VL&&Rnq0U~c$OpK|beSjP{FO+pv^Zq^g^|CED<(gkU- zk;~18DI@5|q>?Wu{`xg;cz8Gj0=9S;;GuvwrW~A! zU!I+I9I3-S@u?qL?5sRc9X@#f57IL2CzMO&!2l4TZ+Ms&>vUrG9LgSZB;*Mj zz7ZA6y}-GV+}?^N0bVjda`6DG1zI>L2)wOcn(X+Xlhe5HBQywB?xkh2r+?U6vh9(7 zj!p)Ec_%O|#Kjp`KLDEo$VsMv3^lAFr4v$FWb7PknbO%za&hFDB_3!F9|Zh;DT20( zLQPkz5q$0^k075xsVbGn8j7;Y4ccL)gOl{cYCgOLZ=xS#elSq*2AIlH;I1-YxU!}>2% zS_B^g=G=61&{$Yn+H=_Y0EOMEYHEg{s#1Ef%e8Q>p0BTnl6}_<10hX^L9%tSaX5+I zDm~xB{7bARwmbI_2AbWs%F319-H0s75TmQ36_-u5Wm3dIe^<0=_LX@ThZEuSw6rF= zhIhh^aDOyk3Phd1Pzlhh;?hN4{VmZjWS-dXm z<@5z#3oiJFG@M>tL83dGvbo)s=mEf#eLsG{LI6;Ps&eUsTL;tH4SiKjI4ZZ!ZhRbQ z1cfTQN^^yszV{5>uX3`9g^~P62pE*P46h$-#NV-9?9}U&21ys2=lV=hck5jW01Joo@Y<7$wvO0Xw?icn$l7?++8XSL=qIX$ z8)SH%!l|utrw93QUJ?~*2U{HV{B8~xUYX*=4$fgAxMaEa+O;-W<>lA_#MC+O5_+6% z`GfLZS!8G(YO|j05S4xG__%5@k_mv#fk;?pw>p;>2ma%Q369=QP1u7TX1_@s{xi>Z zHj`vWL;gbej42REh6KzaIbIlBa#Drk2P!_vycs)Gq|ZqE(7DAO^U( zyJrE+qFrOv4s{Ei@2EBQ+Rl8-dxfOs%Fg{1`8!Qd%eVksa7;8&z0)+Sf14=)WvjcV z>l^#uQT*ME#1s?LUWtt+rMC|cW68_Q15?w+_75PP0m-?kxtTe-nwO?tHZP#HRkT#6 zKH%!g4bt}YO(ixCj=x&5`WuQeKozZ~*}U$~r-=C-9#v_T44Td=W(bAV)$uus@&~p< z{KX6w)r%)UA;E08rfhB&Q7b`)N8VsG*;++6`HzQ+mxo7!0bk(Po<;J>?zr1^qR`fX zP*}RC(74cL0H>+tu2pfQL{_HOQEF9Dq&l; z9@X}qb?MgEP9q7vzxPGE$|EV2IQGQd_4V~XM9pudGbD$(%Asy|vLwe*v+;j2Gm!GC z-EX0?BVf|hTwGNPmnB6-?SOLHmPV3rnk@sr1yAcxEk7TO%LZoD!Aex`Z-kY`S85PH zd0%xSlTYO0xSIm`r}ix>{$g!{=6 zoEYLn3R#m97Y-+oS9*tWbr$2$fAbWhA%^j#$EP}=@aQPprHTy4Vi+sh(a zhE#H2rI$O<+UX#HIoxx@!3tQN5+xW2JVcL-D6k7V?7ZS22`lmGqyMFL&;iybQ*>Pa zIs-FvcQ6`}-AV_-?eE<(h@a!;kYQIS4rXSp^S4yi^8MQ>!xiKs?nU|WFV0u1m#;&X z0>}T90vU(&GZ{N$wQC3GhB2{v3yJ6O`0G28C*&?bks(XH7zcPO3Nk!U|5IcDsYh8; z6WL$PZ)8O2H0Ev3SKz(q=3pZ)4-ia4fQ8Yk6U1%F=y{c`IT|!P^a-d;F@GjwE`2E2 zne;2)2c*07SM`d1yo=;ZWZ9Vc+D_V-m%&7OZBR#sUi9zPj*`fnx_$OuDgh70RD1(T~Kcf46vJT4EYN9S#gc*ZoxJ8m{e8hk$+6h(b*`JyR>!$D?0 z+npM=3`+;Jmh)NnBdJ*Ypa%$f%fZ9hwm>OJK^P!)$7wi~U(4` z$s67vMGg7!_Vl*B>T1T3gqP4p#}A%(fo9kIgD!Y=(h`|S#$>JWfr|^`AN#M_+;zXG zti4cK-p$nQIC4Zthm&1!rQ~3IrTMTC@SrIbMW(+8HPFq|hDS^5#5y|X_un0raL|<=uGY4<#rx*7(uevdbrO0dq>PvwD?`8ZL($IK zMSHrPrtc!;E!OR@D1|~n`tMj;S~Btl3MeSq?cWmcQeU%}^=%QsQYqqy2T~4k+G=X1+%Wu8%X1|}%arE-ms>y!Ar_>^qhsvVJG$!; z6|pyxoP_(|yT2f*O}&xFik%72 z`N_P6fv-+6^g^M!;wDiE(!?){^LF5%?95JloG;&(VQ3d8v`V#7aIEB!Jsm>Qoor|IrnWD z)k|l7Bz~yIrrEkARIYZNkZdagtKj1KNIJ?uwFl}UWnFM)scLpu_clfPTWf>M(bH#j+bRY2Kr4rEQk{MrzI?f=30jrAm-#W&DYs@y14LUcpmVh z6SF}8+U+SWYxz-#-CjL?6M&A23N%AlS~|Me4Dn<_jUsDfmpm`#Ndyi(o- zyjjxio)qmLUGDlQ9BpLjEuf7ED_W?|ZM@y0t+cd#Pe6MPm9dgCpV(0VLI!M=*}f0= z5I>;88G}Wm+3F(!v_^9N9&WKA1{YoXg zEEik5Z`1A=+ore~F&eWo-g+}9HTVNJ8g|@cd05El7AK3OGX5cpl7>LzPCM3fW#psd zwg*o>m{S!x?D;QPxmxeqViFmT&uW;w?7l*RYq`?}h*XY_XCYVH3nb9py)U}oRwI$c zKaDE35+6&mMX}tOn~zWmCYgC2RAHgiIEl>=766jyVbKS2$OllX1F0Mk^!K*)EN#_8 zr5;Dd4i{(Gv^o07l(LvTE2-62_eS)n2k@Hd{v9o8T6@Dq-F+7P8N}0gFL~5onWBm4 zkpYQUa4ju4IZRF-`8*R@(9^?Q=ER6}oNfQCcsIq;biI+f zQECA7(Lgs+Pabk#WQch8S3Iz?c*u$AII2v{BAPaAP)o1r5jxzmqz}Q#^YN`|W!0IaY$ie3;2iej7 zB-)nN5J)+6IZN@cei?r|`#0Y9~=3~89^-d5P+yF`o6)-S|&ueQ9ktH%wo z9Zw;;XjCh1|3Zgwbz((Axl0vL6(p7@>b_wt47y}odiV<(a2|v{A+32}h+%mPA6O3g zL;Nx`Gy9R!%I`-806*bK9@r@W1(dpn`$N;da6>;AaX1Oi_(g>$JYkMYcXszt+T)ZRNKV@(x8 zsX9(FO{EY0jkopL8FPPnUxqgBK?Yj`o@J;Z=?Gx*RbWjwQ4JpvMvnC`Z10m9AjS^| zBI2}jHR`#1&mtRY6sCSAYj47->DF3a*9S^1lSs+{N@0(>nUrNq)jnX3F&9|kAZ+VAQ(4{I zo#0C@^E()lD#DQWxOK`y71b^3Xk&W{W^*Njm|y6*L?VAB3{|r=KVzzsX2T(&&C>R} z{U?Nmrdm{e&GqaiQv~M&J5MyUwIhI+64d^sUc4{{6r3k!4wSCnqm#gbB;VI318vX{ zNZ;L5r6*qb@tVU4sI$bx2O+$@mputjRM>zjB5}u5;NPT~-zAE*Y5coZ<2ZBamD1vf z$>svG)n`s6O?$Q_p^Df7CBNDyXW~I=b66{aXvHwD|0!DkZB<+bh)BJcn}=r=#9j&D z6@q3rfI*E0Q@9n=jW5gs_9v(wfa1(^p*qO!A5>&wtR#d=W86HbG&@YROmUvK_b5%d+AUJ3Gtoi$w z>NA4dyF2^!9|T>!y>Jk-!IWUYdVtgM%R=kjQ74xD)sf-eOl9v*HGpSOw_BP4FEKh{ zKUwjFC6KKgFRTC3P;8#a#V;mX2euoqe~-~Cvvq9*h8T&g5gK#X#Qzu?+h;h`imRqG zwIjUif$U$^q7wcwf}NWkV(i&ZjZ-_YHaEKYtCWMAmkcR5wA-ksOc+#lt52~Ki_5o% zWl#AFu+V!V8$DdE_`m7q=RaQ41^R1ZXQqm{I1Dh4N7s}mFag--FY|~4NgD#X(5L~6 zn_XF=6m&rc-Zmcn;{T_9ve=M)9U${)RW~ELe@Glx5qP5EV=mD;>R%K1d@t?39!o$9 z3=V(%RaE7@{3at?R+(u2V|Z%0$1XYcH+J6dMYNOB6n`-?1?*pmI0(&%_UUTvgWO}r zy!5pJi z$tS=DEc1|gvfU_8V>o3u?ZEUzM!Ta8>%v1OK(a{7Ig1e+zge<@f+AJ{ys=DB<~7>f zglTrfTS^%7p(SM!9ks`RtG|nnx>`1Qr9bCy_?n3n&)6q)n+XV0&BM){z~czDShl45 z`1rUym~9TFy8*cj+Smh+V1KIpp-n^%eIRsaB?_TvB}usM3Fi8Qv8y__>`qke2Yf%y z=jVJXsxdP4#4)+>Yxi}KD-JeaN!Pri8pK1&XKa_L(VrqJ&RQ1JF>Aa6tH*kJdnF5g z(tw(3(;=u6xTqks-d&JEn+>4Ls>v^p6PUnGJ*>R^`DztC;W){G5kis)cI+m2Sv2O! zN^_M6G2z=8!5p9hI2VMo-_0><`Zf^Lqh6Nv_7v>qG-aVs0ruRnz&pU%{=HfFNKcE& zgnSz?P>pfUck~G|k3hdu+c(qxa{D!DXh9B`IXj9}3cf}QgLY4|fh2#ZRZUJv5H|Bv z*DFBY+#JztI=i}3iMs7b&jJ0A{zZjE;KG}U=}eWt9>IQlUP}yU^N(Pex}jz+^adDO zSg&`~=5$*RFPei%KPqv}4t%BiEo;a~?Su%qirOgcYe4|SZ|@^~Q+_13FoHcf<$ zolP#losM>tw{J?`4wAFc$8izUnXjZ;EZwZhwpl-e* zEx$7&8Hz75&^qZ3iYdUx`;EAq^~)etGBmw=F;JXEkLsM1!lzCrEM22`#A%ZzHwFLa}Fak~nOf)~|k~7HCpHF=4z37j>+Lq1Z zzcZqxnI@_X2{M!kq5>UCpQm;k@3crNmG#6%5mK2-iv?K6kyG#U@pX%OK>l;rj#NHSxTW6s;70WNx9(zq% z!Bha_WPLwrf1L@T8lc2|KhBU`|Bj$OA2d;b{%t4t^aR5>Bxg!CYAucE#yg>rC(f`g zW@tndwj4HOybJi?Bej=#gl6KKLArjUojO$rWv*%L}Y=CC#y@i;bH3 z59>Lwn_;1J7A^|UpRmPZf$s3FN#Naw%CHnQ==;KjakBKp_$wjeI6lC4*OBqn@-{&i z&a|>(Vk);_pjhQmhzzK|A?Mtj`Dq~CDN=Z|^{EbHcyjGxfHjGz&H5>o$dwBRT=0On zAu-V|R_q7(*$dF6l=w>$Gz6PJK8XjOArKC7COE7FX#@;}1GSPeiBF}ambYmP`P^~{ zJ->aiE3U~~Dt()=2sAl^*Amnq>C%o5yIdAt44D^~>P|RHntwiOO(V6cSBjR+TbHe; zd94U=>3bA!xaCpt&-7DOu`E<}^#U!e!l8~gZ1H8FLAy%(eY8;F=>@ittPwV)46>AZ zFlu9&+M3&?)$CdV61N5-7$~p|&L$CZW11-r-w04{`MAQ)5 z$(2;mekCni_ZHCBDA&q}ABPJo2ufVkf1UE0#~wp0O)#LyD-ZOP%)wSwFO4EO!}>!U zZ}fjaAoy+2=K?&^y~T~~DYEfzO_ulHO>_ZLaHPkW zb}B`mQHvrhyY@hh9c6_F$ZPvdM&Qqd&<)C6vjEptW3L?sbT>zFIaYHnAbEnKEtr2p zMRfw3@xl_lOS}lSry|%YaRJzm@i;fUE2?1h(-}J_Q2PbpDnhWKdE@w=CQ_d5=R)aQ zPrAVdueSA7w!UcUAqrzuv-G#|4P!IOtO()RcFkXKplj}I(%J$e;UNVU(&_P?jen6! zvLo+^P>`tU+cQ-#UVPsKt$Z^OZ-tWZgPtIs1+}tPI*%=sZrcb}C5~_#8s#Q4+HbUH zh*1N}DEvzr+ySzzZorU1bR;E^HGUNVd*rDs?8ExygU}jE$1h~Z=k*-7 zdM4ZJH24M%64V(D#ZLkr-M;ldfiB^f2cZ<9mGtS}|_5*y^ijlW-72LrMF_sVSewE|1oeC-9J--fQEH&umhCGs%ewI#kj!ZR^xs z$rL)<17~9J5ah(L`ICmz6+YAK(58`mf(?V<5`P1{lNPIdN5YL zRu(5#ZioKog;&&UQN;`RDy2AOx{R zF}{e!Y2bn0?(W^`tVaGuM~}V(^(lbOhi;3(qXfj3pF&UBi@^N<|9B2?y;~lRZ?D*? zc2h3-=VxcNKySJvZ7j8b04bOVkF9 zYIl}k=xZK6U?2Zqtyc1>JS)lO8RDRC24b*1mY2kC{C>jTufoy6(XrBP=v?gtf(307Kz5QrN#FyztPvS1BNxLHbDA1WLFdJ;yxdj90lkQXq{{sc|v zpmesfvH}BKx}Y!|FB=Ss+L083gc4cU&*FcqI=Atus+FjxCO()|%9RX%zj%nNG_|y` z;mQ0t`v2B?1!lwuIFJc@)l^kK=jZqAO#jcOt82cu@0A#S=?+EkRffpsedCAyQc+>* z&FR@0>FvEx*@gMn)KuHW%jT(d=>0L#W_23NxAJmv0pr)m>hyoLTX_f=?vu_oOgpZ9 zbFABDN?oMEcms#Iki%ezhq4@hvU^x2GfaW=o=T*{|U_&QB)IKaxzNj3HNLmmF!kHGKE=6+(5 zCg@taV6Zc^(rdk9ta~c3Ec*x-I1vnHzL`$f+0#L7+`5e}E5J*y-Yar<_A`mo921Ju z%gfoI4>JJ2MyqVDYcC>$eFzMGZCitR2C_&ZliY_lE6^q;%W6^@8X9OX1+;zpZ-o82 z2p^a}Aby|^&hYz3dT1kgc1{isu%x>#x`)WCfz4(>4%6LRA6fZ~xhTuC9(lgM-~iE1e}5uGd-3UbiCu%ad~c_QfAIJy|>K6K}cy{mzw<5B)z+ z-&vquevNO+{X25^pJ(;_-DcXttb>GYUh}Vu@Uj^ns6B|bwude;MPI&1fX6Mok{$q2 z5Ns$Q8#cyO0w?3vXimw+smPz_3F&X0JpB_lQ2*EG3rQtn)&Uuu#5l|On^~d~Q{wmf?&J5z&%=MrcV<4H_whNNuNQaV!d&+Q ziO43);&K@+Z;9~mZ~`1JzdT8!(I$4bst+D=iE#;#KI)H1KjERfpLZLI3n&44xoY&2c}e{!fl_HvyTM1WUjrw zs&IhAzSWv|z{~&3C$GI~YWLXB+_q0Hk6bhtA|~SS`cg~)VePRp!W7S2W|!*>H)%tD z=s>4mH=XV2>49*u(Qf_SR?zHKKetd(d0(`)66Mf?Q(0Opm_M)#8blmalp>WXudFO0 zuC+)a9=|DHA=2{KaNB&$xq=WzVK8D75?(}Bx^vr?D{XP*A}uZLxwd*4$$Cs#>X2=hjrzpVB4%|3G z12FLG7PF5t3oFcKL6j4Xf)M}0V8pg4xy{S1t74yZ4tD^aKTLns^IalS2^wu+eUJaZ zn~J-K@Bn9A&$AVRhOH0IEc)5~48xp$Gq?(gPII}ZY-(?ejxwa2OV1nTS(+2xD~AEy z7ki)>QsP>l$Of#%n&tHOft&n(X)5L6-Hb!cIF z`%rwJfT@Z6Vx8}b_{-x)LY=mT%A%sX(zq|iyYA7h-54MH5~@~ReVG(s^1j6? zsNy9p{M}hc*J))#rQbB%9L-Vn}#BY<(TnMyBs?;Ez{nw!S62pB_~qu+@6z zS0D|4E={&<%&Vn1UAgitA0J=CHZlawfI-EypJbMolX3haCKIQz75^=PyL0>jzl)2D zfq=~3ym{j}*(?fgmZ1-M`T6?CkJpb&uiz~HPOD?*PeU#kuR_ciTu_dBrGkt~FD?C! z#bO!%9;zXR=N*KH5dbtz;a*whpEys5!AM$8EdXn@%!~)8wBN4C@#nm$AF7S3)XAhy}b%rKj4sb6Di-e8jC*y3QSLzBNRDwI=vwQ_0g)_a~FaZ zRTcCSzAxI_*U2fa3FUXazANrPF8p}!^vZo^Pzs?}L|-9HL~xVaJvz#Q9Tbk;FAWt#VyVC zC>R+TwSQ^3@cv&IwKJ-TB530l78X}0b#-+ItF9e@j@`%CcZt)lM0l@|?m)UnXm}ya zB=6YKI58mysFIHaa(cJ2@(B|Y@x6QZcE5ZX1SMN?VTaQpAt50GC`Gr5VzG89DV+*? zH821Yj9*Yt(Am{h7qHnqLrSit5@}+hKG>W0jzT=3zn>pMJeEG4>C+bjc^K9U`%-Q# zKk^u_Ii6=pcXZs0I9gm;+3EmSo{EZ!xw*MOq%hRucjDrLk#8^;sB=_(4c&qIWFFQg zHYe1&vWV2ifxx*lj_w0>N-dlS6%X_nqz*U}rM}lXZ2+kVb;1#ARe;!eU~|G;jF&YK95K#>T3b zwGZ_5wO`-VGBu?L%5%OMk{(H*h6wB`cr{ckGLIc2k?e6BqV5KvzxF;^#HEq3ac9z& zWNp-dtV7Z)0YSmS?aXV}P9o8;T$PJ&yFA+|1KocY&%-w$KpXyOe=z#@LVikeb8`cW z$GN*=cKmw+YZO@Vxpq#v7)S9Gx~eHg+E@y;@Y(%;BN~7RG#|`(P3j_ zJd==ZU|2~c6B8376O-W&9&T>L2$j0NlGw5(7`HAWA|h*7-AzqRL7^5mhY29`1%!o* zuk-WscW6BBaW2_j<-H_dR8*91mfbxx#2e-l1MP?$~LZezzT-!)lKMRuiWgDuyTfPp}Cw3y%8swC!s6o^gRquN?o z6c#H8J^cLqBzEo;%ju7Rlm~t#5Ai}Qp=*DCi|u22kF$i7)NRO|B_uJ{gmD^StA>pv z;FU&by`k8=#ma|O9A3$mVdCvxVBw7R2re-oVs?7^W*EK%4s6}rZ+)>-Q&*Q)RNRax z+Hl^1;?5CL{oK{fC~Y_`w6L(Cc;C<<`;3{Xsbpe@Q*uqfs2XNF!!oCJd^`=<;2tci z_S;k{emIe-YPtc&o8 zvJoMJYKO=oIwST62mN+F)IRbR-=MWkTtdQ6m@nqcpEKe$OE*=%&VlU+>f;49>DTx! zCD$vfswA-##-@4w%KanBY2KpoDqoxD^uOrrY({>k9PO&BBQ15p-5YT>2w711A u|7&K=WTxc)a-#oPdv2q*>-~TC<82C2{?z+4k2+1lkAa@?iF_UVp#K5}iHe>8 literal 0 HcmV?d00001 diff --git a/docs/images/linear_fit.png b/docs/images/linear_fit.png new file mode 100644 index 0000000000000000000000000000000000000000..ad032f0f3191750024be10bad5e3ec8f72beecf1 GIT binary patch literal 20351 zcmZ^L1z449(CsFryIW9Fx}_8)L<~YeQaYs@1Oe%igNR5;sYr{obW1DUDP7WiXX`oV zKmUF1^+5$TeEW-eXV$D)>kWN)UjZM71_yya;NMZat%^XP=)zwLEKK+g=KwD&{D_KJ_45C~int*AEoU`5GiNtLM^nU8LuY#%J7*h9 zBPLf>SN`lW}u05C|s3o!d9n-ILa;J#lG4pFh8*S{61?SrS%-kvFOxKRT$hy6&jsTUwSlvAQ>=F}A9(S-mi* z9XHC^yfFH@bfe^ZrMijAjmM9eY%oa%uk4(#uu_pQ`L&y{CN0j@&l#EpB`k_lPbG;v zO)hS+ho#?OBEmvXY%D87_}yUIrBtlD3SS2?p~S-rRQ~9?sD3w|V3F8kqoev=PFmyc z)x4i1hLZMhc)=%60~ay9Q?XMPC?<$TnT%rVc_6yDzwfG?B-Uplv51!cko~s0x_bPH zj*bp~s{_VtC!Ve|X8H2OR1z*OZkO$%cilQR&zao7t)v9M@NR@?b?8pA({~?vcuS@? z%`-aXy2_a>-JNxE=Fbx+n;u`nF2DBTpSL14rzc>vzcWQFNMyeAQJvF5h(^07+t(}K zH@gvou5V;wg8u&f!N7;Xgyrb|)x8%Aaf(;Iq?Fa-7k#Fj5LY9^2$V%B(z1EfyKsNm z3tLVl$t&q{cjpu9#n-PX%B!lz)=tq+>gTqZ;*`=V>o{&}v(L55>uV#37DjZaNY{g9nZW4%oHiaUQmUb2TI zgS%u(SNDTL-s&SCYW#9PKR@@gy`dtjfls#49c^uOEs?YiyGtssUcGwv@go+kh|SV~ z@R4^-AB&%C+6_<7>%Vq(9Cm)*n_paHk&>DqQui9|K~GMuKsc}xc?R`KkI&DWAnJ~G z79HkVB6*_6DxBC-+-I?ynwk)ZN%uMYYkHMrw4$~`;u<*v>3MlwTk-Na12ks~mQSAw zA9L-VY}RIEXJe!KA)ar@=&7lZ3J41FyR4|6o}X-5*xHJ$EpEI2{(q<}Z1+Ni`zUw7 z3V#s|5h?#;eQ|J5BjZMIg>v;#E^kg&_u%X7;^GQ{Uq`3Xaef;DB_$<$%U&zl$~2KJ z(|6d8H7dBfpK{R+JZAL0KBc2q|4nAIqM{-sI$G2_Dmq%>(IavzYwLSad3kxwyURcD zWvX1)1;l@D;4FRW$;!@dPd`(>!O(*YnbMzYBgIq_uDR91Hs8K_>~ywZI~^V$r+@qy zQfgi7_{v{3`-M8Eu=n6aRZ$V~+$jNY*Q$yUdr8zq4tpx>gTrX!PYUB_y2uy1S*~V(^y&o@&}k#ie}^faQ?r{J3|! zMMhehA=T%YgY`j*&6j7VhudOq8@dL&o!@B}BWXpAe!RbTxYbCw9bbFeT=?u{f8Bk( zWT?>R%=7E7Z(JHxOR2J+jum4SImSoc(z?33*J?z#bjpp2sPOUegL;mAz1-ckp5=GP z#l3v_L_k@AIfm)-<$AdOy5<_WS>+{MH&Oo$R!K=ZuY(QY(fuybDJDtDBy8?c!-WY7 zQqq9j+-r@C@Xz7VQNkBnuK;yUIaO5>W)>D=I=YCqHpMGfuTpFGNv*RH(Ipu8o(*hm zp!b%%xH#(ZHEMW)jfIU}|LIXq@AvOU!>KVbexamnMqAVMipt8jZEf=nr(SdG@Hx&a z;?W3U9Za|}M$w5I4adKI8%8Z?Ha(zeAZKC0e!d;;`(}@t-w6Hf+qd=$-^dYlaa@|c z(JWloFtBWQ$i($do0cl#7nR8$Oy-A)f3BIqS3E?>T^RqIoINA&x~L>0g5 znid|l06H$kwHqED9=ulF0#xg&4|4|Kqt`+RJ}fLP{bFNd+v{c)I^vBNzQt)(I&+|+ zqHaxkZSILhIVkM1 zuKnK1qsDW!5BFy#+@|ETwQ17R)3<&m+Re7Jr8dDqAL{6YlXGcIr(T@0nRUGl?2);k zlW<+DOP32rAQYnL>VI}6B2TNZkk4l1GsXkyqbmXe0v0*7Rfb;zPvFw)ySngU(Ko?8 zocZ}P=wQ-|(A3nFD7=}Siwh0W`Gy~Db*P~A6^9xT35oy9mqcIB_Q$F#-)rP7^e3u3 z7Zb#wOkS^F8OR|fB0@ZP@BsM>BO@b-=LI##I9wXJbxVC2mx-{p+E}7*u#?T69_=8W z2QzqLp`f5l?;I&ADak?*@aTWF5G#NiqEVoa&&I}9UQyxVaV8f|aT(I&`uh6eM%CsP zEE3xFsWLmp*F1V*hllQvWxsB>(86k>`}XbI)=rNMi+}*h&SDR8i4NvIMoUmOJDlLH zJtg7&x`hfMJu^T5g3`d7SX^9O#CkzswBgxmUfI^5j$OjU2X)R!>Dt5DaQE$I$`kLd zs^D$=Kh>!8lG`PAQBhHK4GmPpw~me?w@rQT^P|O}GdQoHGvre$eL9AR&k?6&D)gP5 zoxLr6_0pwFEJ8wLn>8mq4Gj%MbaXafYCnJeZlNBlbuB19p3-)rGVt3srHs`rcgc*N zg@pz8y#YgESvQ!(n4P757Mtq(zkA2@6?uBgS@FvIaq;nU zft31nerT8Y%sQ#yD9S04BOhs7-(JgW4yT+OF;8uVQgmuIEo%@QQzBQTyE}l!B{*g~EU8EUfyD>ou=I6upG& zG(5F;>^PipI5KCuq%^d&Q1I&Jw()o?)zsARd22jxLC*O8{rgt$Ey9E~opSrjs%mO6 z$;pwB^uL~LR8a|AkzwM~yjcG6fkj4!F_@4sNIS2k_TtR0ZNaf|Q~LaHZuqN5aeZD| z8pK`43=fm_g&AJXwZcvF}T$drIe{MMc@FO=4~~oi6A%CgzPBf1{Anh74A@F3rJ*V1PuV-GimiuchVX;UjY29*#~D{YFO>yW|%zJ z(Z+XKyRVlIU4ylX9yPngt;^Hxrt~lBDkcDwG_Mjw9U|yt-2y(t!JDW&xPYPlg=@==zoDf4AqCH} zgc0U!C!H?xH0Y$K|5BhfW9sg#mqQ*LU6(@kf1k=k+C%o22?^L)s@O$Z5iee#1H|&O zCdNX))I5V`t9izU6NtLJye#h&r=hOS#KF~L?$fEbcC6e;?S z*W8Ve8uDt)+;B0Jepfl|XZLk(QJ9$g`}C=B%VCLIT3WWXxBF>bYk7f9%4atzxi(sY zi$sOjOUuitNLiI{K*B~$7qv1pLTTwr(forId+YNCtRg3QN=r*=TzuRn?KT8^RYu6< z{L(*4KO3|vk-y7rE%eq_rVL(xHu73*tV%gKxhpQFXYn8#&pAFDpxlG0*y?YY1~p?8 z2Pt{qOdN(l;^5#wA3{V-Y&cB8$Y@uZ^Q8T+tL3iiH1I<2HR@DSJ5)-E(`#8ghcPJZv+Jq?5So<*npi;#btmqm%i%srb4xL%YJ?Z}1(*~U_U zX|a@M$Lq*sXm8J#l$2EHxS-TOG{nNo8_l5yA^48I1vMX{%F$6^(tGC~G(FJ7VO}C+ zX!>m0p&UgoXok1HJ{J7x6Q@OqHuAYbw#OnQEIY2u+zk15Z9j3=8itxyXf?puW+LG3 zz<=%7{po| z2;4|Y@c+}X6%#NtH@__pFE;OC=H&d;fh?SewG017n%_B)diuSauM^L;YfX@?tv@I0 zR1ZTVS!YP+Q_9C9G98M{lkhXJ;tU;d-Ns6v!kPG0RY`<}g&`0~-~u=GZWUSb%Wltq zGe%?j^D5I0imw1>w5mNzVs1QqNK|S&p_D^3@p9X@6V5Fle6s1NTbx^b$>8FcQu^Z5 zN%G=!$KG*eDUeFs@ydF!JFGMR3?mgrTT4q0BV*%xr4kYn85I=?t{m5AMvd|Qdwt>B z<$zDkKFA|GA!2&^d&twNVfoMLP;6AXtZog}o+BSNlv4Aal;(WBszS>jtjLx61*Z>x z=r|HF+e!Aj|Gk&wqD5xU=zHq$alq>9@k)+`l~n^gLLxG<7d_G^cn|^a^}?Y2rXvn- zosr(_S94#<{_s#u4GlVOfIWA4N2mYqb40bZwdE0_!4D#2Bw%D@Y~1gN=WT3n4;?rw z*VQQ}VCq(`|GSG0mP;nf(JYX^e?O{y|La%DT>a{|aMf@_ zFP5DQC-cJx+B{bP^*3Leun`?!rf}gpPzgN z|1Yz*aO~o{xTa=ifYSF}H}IPh#LJidoXf-)OUO@eo{kOaLNI9;4*m36Za1ezFnK!cF*0wGru(z1AUs2QF?Q$&TMCCAEC`hbL*LO!QZ90 zFN+9Yn7oXbl~ghD@4_$n&3}A+gU4nw-V9=0?l;LZHa3Q0zq=@eEUjquMGehU7`vU?v2|{+pr- z%VTGQ8GY&?y%EvVNB;Wdh`6DogcHRer37ma05N=cS!-eZ2iw5C^x9vmhrWo;Z#M&6fBEVadW_-=Xt*-pzjyVpQBfiO*_nVwNJw~i zc=+V(c#pzh(YB4U3-5p8KMTnkz^YPC6v5}_=6)k!it|GDGBg6gfEBd8i~j$JVQ6qL z$nwW~!}ZaUL+C9bOgc*ok$m1=tyh+V{r%`^4?_UU&o3<8%o$+5e!a58+s6s2i@T>M zGaK6_Vq#*%bJLF3m)Y5|ej|l_nTsPrD8GR}K0G*ZpAwl?~ z)z;G!3L+IQmr z1{M3T_QZTn#gNPF5`#MeNdW!qNsWfQ3aG7lNz1^%AW6(2y>+#iBe<&guBNSxV<}m6 zahB;9Hh>kc2W0^K*xlLtdA!f5W{~97pkcbQe7=NFQ7z;(7b&{+jjQ0@`}gmMCf2PD&NSd5VPUceolh59TgzK_IN3O zq}O=3XwHo}=LfYHGskMf0g zG7)g#Cjhd_pPh)Ps;cHv7=>*d8@{;j{q?+#NL{FuS?U9g%+H~8xjc!Q{Kt{>i(D@~ zvvER+^%bMyEmP{lCXT8*;C?ER%S3df;j)K78*c-J zMkT%DE-54=^rSHe{|z+tUGqyfQWwW*-6a|1(LJUEsAmp){MoO_yVoD(Q#>^_rY$v} zZ2DeZak$ASs*!6{VXlVdA)lszsrz!vF4xNHdro$3|S^Q)ZQa{?XQ5l= zitpp&Bj&O~=KbuMW!&eW6nTy>rKPCP4OQc6REPSbZg~udd1qlp=CyP*;=0$p;(Kr{ zB=&OWSoP4*yRi|~-Dl-)Zoe_c)vJ4lSB(FCJ1C$zDmWB`xYo9|El5rh0HSQpfYE$g z43M1JeRqYtdgBcZpQXv=HTPhY9P)gi5vA3>w;zo~W_&XV&)EN}M)YFwA`#6QJ4T!m zMvS7#Hao=aT5F1ZUsbKR4!!-YTeloG#>Jr-m`iq>tOqnZy)k*Bk(gJgSvQc8#cuMN zqfkaCDoN=_P4`Qbe8s5P89%-U8oF zIXSFl4TIsqT{8Q<;-o#+yItD(-ySJ;#LPJtSuzKHNF@C!cVg7d$EfI~nEB*;0?%&_ z0(n}=hkS5bVYGq+un!@pa=4TcUFn_u3rd(acN3YiYrjJKZTF`uMCu~2gf!z7`^NKW zCH1CqTe@+{hrO~%elUM6Vj85psj68e@dLoZAJRgv)P84YXZv`yU~aU;y4?FnfSH*& z;5#8(hIvkEHwSLb#5l* z2I1F4s-RaGjeLUNX}kTS@?UV8(w?84+{+y|cz%&m@Iv>_17-};!t508|2Z=1Mv9bI zp+z6_?c2BCWo9<@_77SJ4Fd?zlXY%f+ zify-+G$*1ezG?ah8;$tddYe_>!@8o?J@=nCq|a+v0uQT|QMg;R0lcK4g&*6)7?Iz0 zTr76|Cq=R!>D=|-Q8IFb#%VO7Q3b7*W3gP5MhW~0qj zG$c^SH7{+DLsr1mrc06XXwd%vp8-|dcz3B!!(cqcp5HQq%^c!ft>G)5ct7b#oDx(E zmg3yG1yjR&g3^-uVXtItFLSg|3TAjSeclcui_+?`34}iWu|hPmx%`8}D&!YkhRdI4 zbxSf3Hlj)LN!Fn)l<92gMbXJn4yN79@UqC(D4CP-9dFo>;o^s2zf&a8)>LX~DCQ_t zjQ_iLq1eN!1vcJ&Ax;xo>RVL^KL$(!`{o@|=TNc^g}1D#nG5R1N1_#n(^~vEpGHy@ zZvszGu{YDw+?)m!8In^WAP{*x7uHEJD7u?-?N2Kl!5~FL5}Ns-`>7Rolzz@F?fb&t zIwPKh>`)rRtv0@Nr|!cH1})`Eh2TFU@Dm}AE|?ENJEYTV;k+7Ev=>e>9lrUG7C@vd zO3TGXn1qz{QZ~8gi|lzQRp3(c~n)M$Rm*5oCF*nrZD8 zGsf8J2;ILl!`JuJIy0WQqAV%cPc9;fCMx#J`TVr6u8-p^>`k=2Iv`k$8FxO zH<9UXHey_t=YTdU~b}+ndquZ=e^lcscTJ6jnG4g0!!xXle#$W?q4Vy?F70Nm#g~bt>Vy3nw5M z7!EWK&CsZ3lZA$cBFBwD9oz|z=YW@HPn+GnwApLi-Kl=!?f7lwlXFBi^x?>`73Bj|zZ@W>V4P_zc`)vTB+Z421qB7S8*q82=b~jw4~?vc@&m)eaX)?f zRA}DAfShr{z+R7`7QP53_+rDVxDvM5LfEvSU{qwJj*y(VuKlHJT|0A8-FTtQ!~Md7 zA8Nj<&{d-H069Gz=w>XQJ`DuIM_KdzfECcP(<@e|tW`PK?J&%el#+^xjSUD1!9vvi zO^N~0@s_UOAvm;2h-}W0Obj+K;t5^wRD#ZFkd|T zDUHVWO070u#Xx#>RVi9NV|m{R7@I@ynn&HGJZd@G0cuwx6?^h-CNz98rCpc{^N2y? zyWCHUD&1p4edO(v`t62W0uZ@)2l93`Y+iA9Qq6eDa4c&NY#y;%Cyl7SAJmw5wlOeymfG}x&IaP5bi${ z%<fTjX zgZ#!*3p-_cW_4q4O!vf`e&PYS zqc&ddkTJINw*o>+KlNVnUb0@LqjBucL~$`)GR!J}w7zA^vZK|;9H3fb^2alY-fJf(2gj}vg?WE+7L(x^IzM6Tn_CAExEUt^ zR~yv_`fwM~YHDg&iwh1~$r*>+Cc0_30|m3nVqSB%l2=%UIev_%rD!@ba{S1agt5Om zHfJOS^EX)6&uD`0QUb+&%gIU5M1UH}``Jv@(DU>2L!t`Mx+V_{JS^JNvom~|W9s75 z(^Jn!TUAp$ju`n8xQCL;bhDuk7Uhg~#?#*LX@ynYRZ|~|xN!T7*HPQvIGL*?R61(D z#7MdNn@+EB>Q2!vhhjw~X3qq~nbohu9!$E+KXL`5mb`P)$d|<(PqiKfYm{sozs(_K z3?-gajM|fXe%r!HQ%W}%skefzW2iV#Tw-;S&uP6NmLY$S7A0z#j-A+O{TW)3jR_)3 zH?O{a+^S!ARlAfM`pTb==Cun z$IQ_Jolmlhn+C9>$LFF8UN=3NXu3(|Jw*0W8%Ipfo3@9Mvko^HXUlr~sYPW1UoeMx z>F-ftW4ePTJm(fzQpCWO4-ad_UUdZy=5`JesNKap4lWLV9=%LF`9`E47uF9mw@wtU zs6>m&V*4ZaMGJn^t2~NfF@~NmD94kQ?`2N^elReR1oBW?T3V~{3CcPp8xgWOymXiE zx|aWJrCFwFhr^m@wIOGl4*t`_+G5eIfo7b}s9*J`R0i%^p#*$H3Pw|!Pg@cS7-3-I1FG#L~Pb3i&E z=h3&BiPbsDfz-Hd3G9Lx9U_pCUy@7>)ELbJlaV!$936(cs15+7I$-TayfWlh2N5=9KZXWl`IX0&y5w!h!yy(}t933mX9=(I;6b&@ug6?=iW zzNZ9srW~y4WWuK??2ZJoC3PKz7hzwqH|e?jYJ+kAGVMzhJiM zIans`f$%OVn;G+@jg)j#zr$j$$9?l8`Sm|sOMZBqBHeX_Q^r@KCCkUma)8S?IXNTF z#r_P}d-YQ`3f=S$FlTccXJvjtzIQ)aFSDd%;tQ{!l+o)7YRY<@STXqF(!h&8pDrq= z3o*KHIW03>^r2+mMDV{m!m}%&P-~DZV!9v1XLn#*a5+vX=GChg@TmR(GXW*PC*;ul z#j-tU0MQq~TnvQw(L%i9ojW&`mGO|;6&}WtVZ`IF-BeB!myXF}HdB(?MqejZuU1qI ze(0Z|=2Ex>%k%uO;vL1*g8H0U08hB$EtFc_oL1hU6@KQ25rGaqRP^I6%z^5hC0?T8 z)vJ}Fpw93I5!h;83IT{z8bCuu4B{4`-;ixQ(CJN3cY=URHtJ3i{}TTA8{nVmxw#(; zK4->$&lkAbCL_6%i$8zv&!A&-+j|+6pWi(zq&_av19#YESt;xF2uWdZhD)ay`7F(FQ0R+Tn*o zgR(Gdbf#Pm_Cu+>5&|F>!Do_MMEy&S);Nb!MmKK}%TDy`ejP$x#Py1*P}!=s$X=J# z+SiAE)$yGC+zqt2M$d?^lw6cM)}G>~mAp-jbdEb%RqeB`GeQfkWw9ic5~4RDSf^I!hAXkPRV& zVivMTI`ULEJEOlO>y1|*SB#=f%8YsZRae9l5MCgKiw;}UH^1(WT4Ukjq9NAC$_POZ znK~eVe$o&`DPtpd&AB0c#s}Ek850$$@1GaSHOBC6#49?Ks{29Cy?tD149(F2A$8q1 zAAN-e;+D|WBKGN)mX_IH3mvRuGRI3SgKqu@t*yLX(-z(Ly%`v=4jTIT9+&{F0hnWR zf&OB7Cns^Mew*Yl+3BWgF0a{b_r{69gbUm#jUg$+0z8?jy%|@Gu|*Zc@l=;3FWo@G zA696bbjk1YF-7J=+jd9E^)J@WwK`cgjkOzBR6E0Vk^RXQc|x~D!xz!9rSRek4_(Aq zG-;Wfl$%6vXJaE8Nodo&`E;G1`&RuW5V72>BAS4yPIn~A6%bPA?JzyiJY7G(Ib3}O zGdglBcsD-=_mc?zzG+91w;hiN18;rFTUH%*v?WHDJcYD}H+bR`Z4S1!3O$g|uVRb> zDYx#jtp(1UN4NaB$Ief~i}SNL5^g+ss-V5yc6Ms{rGlbGSD+I!TSX^s%YL1NBTBEJ z6@og1kmz^Qjj2?(INZc-bMwNnOb`luQs>sH$aKC|QTmtx14vS(52w*IE{&h>ADZ&o z%-F1^-FW6g0R)oy8$vTVkGX@-n8@m1@j0WGKSwq+7w0ObQlJ(QTVueCzP-bWS9rF} z1f$;G)QSSpFv;NFOgAjv&96HIdORX&G80MrFKo?OSx??~B=pP~>V}Zb$!4;Dw3XG4 zqGJnR!4D-{2-C1RDxs6;)_mggg%H}adPLbI;quCZgJ3Ch3*)bZWo)p1wJv6JW%k_q zLO}v9FOLZ_yq7UCeRJdG&Ju2-WFb5@Dm}zC8;2*a>JP0P@F<#&8zw5|PQxgKYyMD3 zvp)RnxrK_3nW3e9E;0cE_z6zv4fo!BNN`oNfCrfz*Akw+lT=ZCQK?q389t~3 zWr3pGElZ3g@cdDkIhGLGUXzgf|6-XkY~W1}x7;~Y&&vZdsSG+Je))Y2zm4NH#aQhJ zqdt*o1{j*`;|hRKqY`l| zlde4wb#dx|TXm7!)TwB9rTH5ZDJ|h?*}+*KZOCsULyPUloE)Tj5|Ev&tnACTZ>{#r zkxHiik18(}uKXA8IK`)5|%u+Ezd#4s-7PLaIJ9zqU z_=wQ5WE-WqG3aXbqiY%3S}QNeXKk`kY4To^PY%|-t}MmW5z`TpVxj;4PK(#}-DHmD zNns>K2f|R15x2h^_Q@@)677tI$i&1%a9Q9r4qe|AJ)mB4PwLh5G?2E2*Y5ZT(cCLb zO?>;dZgD5ivD7W$xgw>HsfBu-uLm8zYeqf z|J|{Bolbx#`!XwQ%UaFosJ7%zr(n=Gq@o-&v$n&&ZgNf-emCkWqmt!$oL|J*f&xN?{{s&|`{$HYR$={D(=`pFX`fYblwV+t@CR zWnJ?kc=-EqnM*<^G3k>3n>v)2Tx0fpWi+rv-z|5Ck>TgjtX1mPl4q}m9?C=$T? zwI)s6u5bEzA#)jS$pctfPp(aef$+sxLP}Dy@-!Lk62WwwrhYvrZ@;h0#=JPkpd}G} zBhTtuuVx%j==?$^N;hWii9Ywd8wtm#FquzVK{Me|Nx9oQbfDGC*`8lfEhKJ9`2gdT z|1|9brkeH9GUp*5Qao{x5peCv2(1uJq@k2Z{*9!n;QTKJJcGqm;Rq)dsqM`pLwA>N zPoN;uu3uz)Md9`J#kVG$pIa)q{;_1uU8T9XvNTZ87)A36$z^eIYP*^vR%n<2Bn7}E z+GymnDQhc(ge%_Ncpi|_za5D(w+0zN(y9z|_rxvDQC?`fdOfPdBn%zjut_IU`b96| zHJ1}{Y}oeb=R9qZx&O)tN&D_QlB+-6$9L7uwbl_6JZ^ac9Xt45f|OYsS)wKV*T>3O zSXnXuBcexYpaHtS5w@miGUA61v+kt!OisV%VJA$EQiyJ%Mc$UNAF|t1!NGQ0i}tTZTGfgYWF@ZBP&EPcopPJbxJ~TYaVh8Oy4js0iZZ+e{gte$)nzvSip0tL z!y7-Z-B8WN+ev@s@P1bW3Hg{e-{Aa$5m{1gU&upV(Q6?)&GQc*h~eg5oTt zUv8@K0`tbp)Kt6AQUU_sKT&D{eX|NSO5_hf9r>cQTm{ zz95C=x=H+@Vs3Fti&Fbr!fC;D+4$MM0GINNfgDPvQG`O;Fsmn=cO<<_5t2WK}`$auzWZflp)QQE%k zHm7HI08Iei50zx3k7Y1`J9*i&$eciAz{NpN5FUq^YxSfFry^k* z!YLi%t%dS0iBC56rDQtjx*L%Lg(P_z9Tn#+poJ`TE;TiyNnG&^iKiXBhf|*qb2^gE zON53B*KLe*Fb&Kf8IDZqdjGN(53{N$`BlihlCV4Tndsl&q+K1=nMcOyb{lXw+3yIZ z7TQ$$KGO0cCu*L0ei=jL|HI)tF%{ovq42pf{r3UhuQsQM59o8-+669WK1XKRlM#-7)fM{5mc)L1p9NUkeIEV!1-+s|;pO;_I z_UKR~hs3;trq`pHk=Ko_GkU*yjh#_ZcVyt41%AF)t!|)zyCC(}E8Kr%|C^u)gTeZY zVjAjiZOYSeQBLPs7XAj@_96jvrD2u)ntrF+E+ONcIZe%fe#s875+anSCjz(SbIduE z39E-=nbvZppGqJ!0vbJpsbsW}gi_OC!saFO?+Lbwb*7hpn?OKvbz*Bym`3`#=>*vg zv4yQbZ=;}+%oF2}x@8@TTKk%vPx)sQlI$X+ezHT(;pzAu7isQ9^-C7RJp{&&7N$%y zJvxyv9&0_Kg=o^8zvsSbfqg2a`;u%f93njEHctNR7D_z#9C;q7`ED6NVte?@mM$+J zsC0SEJA49){(l`ygpv)d^!Y5}>-;SU0g?jfvB)F-nl0iAXAfXCy}}zW>E_G$I0T;0 zckcTCmFzHI$*{H@6wewoz1`=BTnCMsR=vZY647->-q(x9FCd}Y{6mOy!^sU-w^B@3 zg6A2;4AHF>w6EYq?2@Z=WFoMSZ|QkeNEk0H-B{Bn38C`J z%Am30R}GBNf{U;IwvxP~eNXvnt*2ZZmv6IMY@$;D17aIS-X9lb?1X3GUSMrIH-q@# zeNeGz4C>M)l%c-ks$nHxM>Efx?cKW{DhFAncldm^5J=WmbAc0?kt^p*FUO>RsliB1 z;%SW9DMMysr{F!Qmzbkw_so;UCK^Q#CD>w7)Hs?jBj%+xX{7k7J6bZjQ< zs{gU>dx6u|NP^?I`j_FpezM>mp*d}uD;o(OjQZ%6VsKGCd_hh)ta3Ve``;QE{=fe8d#GIGiay(Ae< zU?2bS?%e;9eTj-k-7y#GFX;`9hD!YMvpv;h3Psq=d5iI&ZUj)TIN$0HXnqNxu~&(^*t7a$G`;OWCKbx#FIC>{DoD zkv}J<5XEer&Lpb5DzfSRRc_Otjqy#MV|M8_Jsy1L}Ny}$N3fcOt2?ZV>V z&Y=KJuIMS4KRQG(n25Aw)nbnpS~e#qAH6NAs#-T2n!PSigquP~`kcP~R*}^!M(=T# zg7Ux8t1S|nf{NnJzzvnzsLvUfUQ|@nYP@%0Yh&}GtgNiB= z*b`H+@x_9#BkWth8crVOeL*2@thP)f3NCuiMG@Y$&*9DqwikiTq7@{#$U!bJu+w02X@oBmRId_gMc-yN zyvKJ{iy*_-6%02)FqsC!jpVO3mf@Ob5=KTw4>dF}!QTe*RzzI{wIEX8t(+i$0mf&f z^BP>n6FJtOpNfTD{kOaV4L)Oz`-4q+$$<5LtQxSN!pXXg;eSj%0GZa!O)eWe+l#9v z&jrWZ-`2QAA0m5>uvuLJ^#4fb!H>7!igB=p@{d9;$=A{qbnWGpm9AdOpg{pSBeg*K zr9Vjowr@yH($UuDgVr>5J?VD=t|Ol~Cxis(}>GK$~KsnwFsu4K5;_jg1YYzFq0=-F23c z(EOykN=isa>K->m_%@h@M<49+Amwye>uq9Uw#(|E3QP-1QkV3c7eJ1!jPec? z3F~Y_V88oow9pXk5q!^5h-PJLrty{5wf8_bKh_zV5HGICcXD+62%AiVrL(U_gTQWI$gbuxvn37UqeX2H<6iC(0&bZ1N&ppnC&CLhTcDh8r3>7!J za%dF?(Tp9QOkE&-LJ^EIHDe0_!}5zIOF&YApIJ<05gC9$O@Zg`(^uhlnMpFZJ$ zrXCj2jI!R;dTwx@%UP#jPaW*B0q!2y9;FTxAr=MZ!T9BaxoIt|NY_77f&&|5G=X=V zA9h60t@W9h)TXF!zJ9R1U0>m}_)uFr440DU9qh{h{d!5+YiwwCkck>@x8KK)?DzX! z%*{)O_F(1o4iASwT4RBhUpDRKzNhQ(bL%b|D;p!AOSZPQoR)E71GC!EY(P5?AOjMGCxjFWZ_*$q&PBX|#rlDkkj2ZQbub$gN=YAyx7Wx}L zLsZ-GazE(sf9UrR(Asb6-@5le)B0l(H2}4QgkO8seakA@>g7IX*FUW2*3yaStR$tg zOF|fa{Pal{7zaZPZIK}t&F*AF9lfbY*j1vR4Px#VqoDQUukQRMCha6j+3Kpb=f_;D z3LN0(gDvbB9bSYXf-Cb~#AP~v1}#_9t52qZc#uYh;zJy^XtO0@5t9nM9+0tbc5bR7yrVH%n(_#%WM6MmbRNTvb# zKnbXPytrJ#b$LF3%i>;Wvyl>KoGL_rme27ro0oM0(&~uwyssw+pB6Th`3TmUgs_N+ z-@$n&mKA85W>=SiA8}4rR-D4_TThV3{dI8g0rroOhoS7SPJB9h7$*lu8sZ^5Uy{(< zL8%i}Zfa!B-IW{N33)Eu@g3Q)_k^ly_N!l8TkpWGmwj=%C=+z%JSdpJ9f91Zror&! z_YO5r|IRpIMTUIDhyMReN5~f)V84!t+Vj1F{lP+Nm{@M@>iSP17Q;!ZrJjL1Vo4M(cRq*nr38vI^VoFCxoz-Z}owJ4|1Om?(3U6cGZEP zOweH;iy)LfM1YJ$R!i&6y@H*7wFfguG0wB&FZT*=y(0u}WC;4q#^@!F^l=dB3=OoF81L9rVNIRMb1Ai_ z^>BNxO7cBOPVd~ggS2JNJ60GjCscbK7{R&OJ2^Fm5VP1H{W6`OpP#Nd*$7a)0`?bD z<R`>&xYY5~Z z;yH5ngsgadq=f{g%nwym{NOuih(mAbQkxNQwCREwYZl1Ly5b&bYv&C>he*K+Y|OxG zzrXf)yyh7#NGHA+ULg5Q*k{KWCX40vGpJFFGEorcV4gxnFo3HwNz#J^&JCoEEy%nC z&opBsCoN;*=B(6R!@r z!3el$D8Ph>oI(?R`SN8NIbpdz1!*l!BgbKXZGsoq`pbQZfnOvS$k^ft;IE*|) zZk(_HVB=8I^7|DlFny89sq_cpQnDk|@7xI;nVNv=L{3WjV{XqED(1K5=D_b3Ldc(i z%^=tj@`8EmlhuiiuC5|78@>r%qF8HB zPft}24i3HR#KUNoGbAYIhqH5Y$W_5CB0>SXz0`pRNJUR?Von@zB+|up8MIzd z@R2%4VD$mjQKB+jVG!Z=^2Z@d8Z*uI5OM1S#06ri{y8 zNK~|OU|?Xa2Dx1Y91AWndBhmjh3#P(lwt%E4^Ov6qEeQ?vNVJsNVS~e@1yVEK;^0b z2?n{#LPGs(;I?`WjxTKFb{X)CQ5WDMqPp&EQ5L-p-Zb!;h(IC?f_-FWK;Mgu9>mhI z-kungud@{CrPV6D%3C=D{xE@s+jMv_R^|J$Wrm$#+8`T~GL6VO&40M|mgt-j+s*Lx zC-E!&1eF|O4)zuZsjQn1-o8@z`y_r zf>E}*PPDqj@5(I%nQ8YBPn$0GqQYmt~-PF_5Lr$lYVzC(I^ZB;VVr*=T4i698 zuDiCjMxrQ^s;YX^Oi>g)t|*G(eu#cH1_2nhu9=#eqV4VNw)wE{gLiyj?2~1)Su8Iv zL(??aY_^WAQz#TLIXQ_^sq|_qZ}vU$H(bU(Su`5O($W&{?(RCePNh=8@$oSi+kXhZ z;WEaUMfeSuF~%&y1(z|#EW!nsF~%&y1(z|#EW!nsF~%&y1(z|#Eb;>yZz$;}NV|dn O0000|o-St_LnDN9k7N+L^Q#=fWQOZJ_RN+^<@P_`Lk-`7-R-`BB4 zc0=~voa^1^^F8Nx&g1<4`t^7e#(Um#-`9ODuls#H@7_FClBJ_zp+TWgbaD^xKS80$ zaVQko52_>Z%3iQb4176ke^*Y83jTOdJ$(&7Q`6CnH0q!XR^+QK#Rq$c+?LOo%p2Eq9x8NEDm) zknH>EIP1_d+jU1**O^qNg#IL0M+L*TRVy0hhOa2Dy9N{}lus8Gm;q(W;>!eIlrH3u zqfj+whcWQO&EOiAYkri+dL8?8r#H=nY0b z>Fg18S{lj1;jZ3U?^X@_62Ys^!^``~rh30<=CdFLRhp*35tQb+W2+`=-wEhMJuQ?!=k#AOQ^NU>~iak?y%MAe$l^+PmaU za<>|-8eZzM$hDiOyTZ$h9q{(tSu^Wd?z(gN{LK@Ut$BHQLU!XnFl;1m(x$iLQsrjq znmB2zUebYB*jF^?Wu@b_J-J*T9UF_KXA?7B=+BK2G=Al@I5hCtiYN3!1X)^6&SNGf zCb5lvV^s;#{i(LZY#a_(IYOzuvgs4sIqdv(s{XD1!Ahi7j)62bG*Mx}F)kwF>%xL% zL}aAJOnXw>h+yYinv|?CtFxP0X#V=kV5v zh=>UL(k4mLp*(BrGu0Z8?MRV*6!zl9i?W|=E!;O*jt}?fPI zrNXdcptdlF#18I`uMTI#=vt9`wkiA=sWHkKfITIT?iFY zO;s?n9il#YDayusmq0fd=RD#%^_CZZjZMtqTD+L!^+-Lp4EXb=c>7Pdax*n|oB<6j ztzx!bWwYnz@N@c`V<>xYiA$bwu_9L2!Ddsy3&+IC3iYP6<}p-x7_KB+!W%6d)4!#-6_#(vv-111zk!vxD!3{qQRABJHG~*jaF6O z+Fi(RSsX48PvCyJlv~e!L$NaAhn}kUc9VYdaoMwmG&D5LcQ%KNMXZNRt4RAxvDU+7 z(aFi@?Q6&=qXOC8(!VX$>8T1{kPQ)H)^+$$x{%kTQ0_!jm~M@4VN%!W_*@-w_SW?g z+29QK_|X?ld{emm*6Uxteod7PWuMw0*x1-gE>*0CoVnvxaOLvlYlkT5g-CnTl8Q=7 zA+B4$17kfas<(a$H2?kmeRF49mc#QY_(x0WbOK>v!euJ@d2>=AyZPIoAVsiF^REvV zS{fsGTPj>vH|F$3M?LKK?1UDx*LR=oPw#w?Wfx3&5PY42g5p{GXX&X?(t-0{mGNdS z_(bKwgQib>K_usW?{IEq>lM3&q0%me=<7cn7Wz9F85yI%d1IONJ?ye~-C&JdU^I#< zDj&`W8u`1V0r+S=Tjwr&;clb!+mYssap0VRwVrvRuz$(~HwQS?$>pBRoYI zJt#6G6lGoE=NCa{YCqkocoQSEC@x}L|A|k#`Iy(=L#jGDmXK+Tp0&oAg-GnoY+2-A zQLO9@7EH~{>wI@wuy&!Ebg;Y`LfTFw34#-u_DsCJ6T)g;^38!b*t0g%@%?nyntNxq z*E+BLpjlcpczsu)McT3JPE2?Tw zo^(Eqh1_$5H@eeXEM(b#jZe2sZFAT;k=<*{P}lA! z)0M=w#Dg{N$*HNi0A{^*oM!%7`RKkg;-- zek2!KU40&=f^TeWTw7@pm|rYk>Wxs>%NQISJawlv;g0(p#9`M^seNpPXIsJ@vE?5p z9h(K~O$YK!+?Ibb3xnZIoDZdJS z?_dr|y;xBj>-NP)-t6wI;*rcgJ$J>QKY#B0KIU!e$p5#m$ci9jJ#-U%M=9tOU+4JG zpq?TtZ8rCX+`;_kp-UXHe*IVLlsZLH#%GJNE3kzmr)^kbvbX7CC z8Zw4fiEYxs?kH&vj9}I=0rA^aVAfUeVp6_xwN+fP$ZU23Ny88&DFL+1<|7rZEeYS> zow?!g_Zy`Jq^Yu%MqWp-{~QNtD{$TT^_`7d_EYJ}$;ktoErA?f8c^Azy$)6r|E->~ z_l~M!jF237fMxsQphb3%Sz3H*@rc`_(fvi|^$8AAiS*CmD_-1F3R-zj-+Xw=?!C8s zL~?1Qvi!=GD}omu0>vc{+*1Rgcprh=)6ARY6o>#jprnE zH=vq)vtRknagg(ELdNAHHm5a4n0HpNYIk@^WafqeF#1wNdA18IV{G_onIdXf7zNa z9a_>_DNzz93Uy^^i>hjB4vbAJR&skzi;tSMPc`3Y6N0o~AXtY7BgWQ+PK@~-6MYci(Rqzlg^$*^iP8u$E7A?tIW(wxT z;MxrX*-{r4vJ=X3jq5Y>@}4YJlf2|N#h?Cqe}puerCsbI{PgErrpDIR-q*)3j2CY% z4r>pWIX;?|h?!WAhyj^t{Wq@L`|9hWy6i$Qr7mPWvA;Ekda69#BR+kan@vj`nKyb} zcVa#A(_Sl-qn!v2<9PN9L-C29wCh)@r)S;THYKow7-4E|ik&$U1F32uA>aQn~`rVnAXcyf< zA3!)YZpC$Vc0NJkRorQzeXa+4u?IbX6ms+CD8aI+7>l(!R-1yEsuRI%xqVvub~%vP z2E9r9+I#&c)>SelLoI&2KZjXeo!{)}C#6joWq>vonMrw~i;?S=rKO?ih8aHJHT^pM&-9@a|{!T_oiA(mV*=@&~(_IU+cQ#fR78YF6UT0)x4q`ngn4K{g z%-;O3y6UCEx=`MYS1Kg0Sh49TF1P;}a<(hYVq;;TZ2T?n8o10{u;@s;T|FCfwL#7; zKG)!j^TsXflb^JzJW9o!7ScgIU$XHAd9V&uTpf|zkWP`Tb_djqTqt3PF;%UgD72o7 zRH^lGvv_ZhOip^h@>jXua-Gf!1mzCW4@JT=)fBeFO znNQl*a@$pwb<68A!-{4+B+qkJ+fcBcg-H{nl3o0|Fc9OJ=wXbe%y0RsCbf%3PL>Ca0$@s`od-xg6)Z&9ZNC+Ei^nli$2yJ0=A~5sAXCg8Vl7 z{)&!IZ0V_(;~ZDUZ>BH7lA3jx6X_Wla!t25SxyP);SjwkHu3I;ki*Pl7>PO-lhwH8 zdi3ng#}<9~9wMfRcG0=AQV|u=z=_5>qUPXv$xtdK}v*;4rmDf>G$AQQ2M>I zx3hwn1?U|(g4GNnzKAbXsKTy;MZ%*55fHFj(E&4bem^FO1DTgU@HNq>Rg&ak)t@6h zo#<2>ZYVsJ45#i(o2HkIq2ka8%Z zVJ!Rs@`u$>i3-$rHz+jglB88hv4x!P-H-@}T_%DR4_eEYs<4QGky}da{k?Ywls<%k zs|N}~CKR%LGE5k$RTW_;^_PSM-LAxjpLORsj?7-`a$HP9hY_0QAp}{wD#L}=q3jr~Zip<8N z)wa!_`lK2LlX_D5whAH)C^fkm9n|D!yde`9%(rZBb^K7h=g0yOa@clIQ*?X+JUK<~S8H4{!(X z{OZKT`gQOXi>c;lH^_*Hpqm3x-vMiOwt63tOKi?VNnamD&K36Qbt!wToMd8T{9)79 z(t-|Uzq1B~#B`!Acn2g|Y(-+?{=)OJrK(*A(4_N~n3q!C%VIf+t*8TSR*bE!5W+22^Fw z+WE7)&79M<{YK#i(~H&Jt1c)|LpEbd65CJdPG6IHp6FTBDzUq;Ww5@U?XCMf-tm)? z*esIGFqV+Jox7A%E}8q zy$TIT5nXcJ@n3uoQO?2Mu@i1TVX=Ziqh~8tTf1s}4+(qixvWExMs^omHuZ1C7fbFT z8`BW+#hYyQ&4MhjwLEf+J+2^BzSC=$mKOwI{*$^zk33GIF0N#3PA=g{qKx3*6StbsainSOkp@8#3AcfCa`umX|^v zRUv5r(g8LzvoX@fWK4jZ*XP$@o=FhCWSquxZ@GT1J;@LDlBWCM8SA?Rs6Fqx)B8Jp z)xDW$>_4sHD({mXOJh5-~RjT?L@0#OOt?#p-QrGLw z6_Ig$)5j7rP*lGttbrL@RNHD~v2nI+Xv^JwSB>S_t{pG(rpz9Q)8>)rFWA(|3MLX7 zteHPYED@~G;+6*EeJ;&`9C*jFY7pTnmF}Bz{i$^?di6dn)cv$<`rYM(H<*$&-nGx4 zTrtjxFF5n68vC)N?b2B@nxMhiv0UcV$uC+HtNYLvFVZC{ar z#|_u*dPc?CH&a_HuPybaM6&Pfu2K_@&goe@;$)TvDvumnT3Q^880^uy^wl!7ShPq*)#A)JpK?oR{bRJ%rs3d9k`_tVYDrX}FHvCZIWPIr2%q1c1Uw*p|KKo27L9Xq*5gE>q%Kih&neQ^oZP$u ztH^ne{EP;?fr0HXW+8w!8=j^#( zRe=K54U6DmGAohOIhqe8*(!8qBqN*%P8anrS2w>{WJagX5NxaP^m@WHd3am)E%)rv?KWy2~IeEf)&ndA^Mdf0A!zEvZ z)4FOn(%1}p7K=#uLUZTkCc$7Ih(B({7ch#!!+1@1bD~q~U=?9e(1AxF$9%eQC=U0<%F_9F-mOnulTQhS`IY+G z0cVDOzN7hn&yep&C+1ttZFrjhGPH(<*OC+Fv==6nbE>*f;^Z`}GfLc9%bsq7?@$l476Yx-((EvP9t%LQn~j(%lnX;6&fBs3Kw$DU;pQ` z*8zwcA~GF_ovxs6GMrDnMOU(b)n`@9|~YqD-F<|tPk zNQmvzt6bE;IyLp#6q<{sySsLZgv+kO(h)}uXCIWu<1GgkFPU_;I@RZjpUNsS7hPs3 zpKf*3l&=a`iI6Qn{Ih_!q1e3GBu!V}QZ;USlbSHi)%Ifgs`f^?s;hIgo+KvQnUpu| z*i4HF&DqLYpOrYqX&iM52Agy3BIoD5s=qwH5HFU>VbHiUR*wWJO67P=O`{g>Aw?HV zoGPUK-^l$>`U=+|^F<~5)V%#=^v^s}2J2MJdhnhvl&qAW(NlQV)NptH^7a`pVQET z%qx2MZ>LDpX5Z8Fob#Q?^n7;jGHIlg6hw^vXMEb}|Dv=uu;wj>c4q~H1YM0)uWun@ zliQ(>4Zgy}QA;X@bYxSxMeJzs?I9V10PdF(eDHcnP|d%D6~LeKz|M@<#m@}pT=FI! ze;c#m6;iaGPZ0k8=s&?5d&8@mDj`&I)T9S5yL%q%)Zo-BA3&skP$aGd%T3&O+a4NV z$5#AWv>H5EGAW*K{_3r7*wb34Kf2SMIUFIoo{#u}gHzd@n%>y#vZb?kUW7@m-4k@- zu;-~iuG;tVhkG)b^Fjym26D02+7{0f!(NWn3lZ3A!>Dxwm<|lt!4)4;lJh=3hOD3M zvHxkorTXf#QkCSeTbxd%cx*y}s^LpIio-F&^cdLTJ1GklywjI`)}^YHM)Le(`JvcA+L_T8na14akuD-DsXS=%x#+8S2A(5>SX(9>HH?oB>&Mz zVu)%Hac?lIX~RXN{JyUHyJ5T%t-7aC&*Gn?JGjAt#W}}+DN9dlQ?pZqn?tgFPNT?M zUBFp=HT;7bS~oUFv2e0gO{APxZbO{gDu3vB%d^PzVYE|y-cXc$QLN+S3 zAl$MSSKnBGSnbGR%ojGq#!?nJb|{AP+oc1&K?HY`?VYRywk9W4KhPZ^&&wZH_0e_^ zENKkft?TJ$EPGm=Iy|+DvQr{^UEPW?mZGXuFBiD5lhXuzs!fB_J)gTlKbNc~rw7@1 z+wz8=6Nsl8Y+{Z3>MfW0KD8L_6tUjy)>y=}uZmr{VlaU^{7@qk zx~`n7RJb$$va`=8T2BKL#IqsqdKv_XBawm9dRY^54)K4KY4j^W&AbE&CD7 z1C>4fi!mnU7e4h>+?LFC?%Ns5XTuPY^nB;nM(*IkkI8j@on1y|H@Dl>Pvsir^1{(* z$E==QC-s3s-u#82BFon7%cFRWd~9A_rY?SRQNJO9aGEgu1ulr!_X=??tRa>Yoi63? zmoMge#5-848uYYIE!phuUAh;*RP`@o=P*Ur%ryq6|C`@K#h^pHkF+VmA7TG_JWAxe zsv%5N>e_J)&#}z{)IW1QJkO+|8;xW(l=}x7ob5>@_}^3c_Hh5ZNzV5=GTYiA$L|iH zJ-reij3Jp#+KmuQQK8fC^*d7;y!q-~fdPU0qbZ&r(Bg#YFQCGXFUc%j9Q~Yf&#r3H z)2;ltKXJ5YqjdF!s*i$=+i%g7KS7!5+WcjDQPWoY4Nkt>0?wDb?=WdwpMCtqw2xo1 zk5F%3eoo3Um^fN%{hgfkYk-qri%Y(Uw@%h18LvfHwUYdc$~Q4tJ6KA{M^m|80Xz`( zROG3Lw|=E-lMKKM{;>c{W@LCne)yFsjj!-H;?bzS;~Kvik%@oKeRu>DIh9*>jjyJV zhc^j2_1(-3NTTx@i`L8iDC&$l<+bJr+bbz{Cnn6Mf)DfWxV`o7Ce`djR= zk5OK-AEm#D1Z-;F{_e3ky@Y~bqVfJepOF|YC1mO{bYB{1$o%yp^0Kz}Rj1UjxsgYf zAGH>g&`%7Qg)Q6opwe?^x$_k)y?<>mH*=7(-~QIlZiO2h?u`j=cpn$n%zre|!qxk0z&3{H zv?wQ`1nDs<1xf+oXMBWH{jO#Cu*EQ@jo!(#_km&<-322N>s$iY1<88+frgeT^X|$E zVn}h5QTSmHPhsr!;VJ$;7G2~z<3dkpc-CXP(1hft{3N62|`C0ZsI3&>Fn zUI|BfIc+#OOKs{P7? zZ9<&OKXlqxcxF}OVkS;Rhb{{-I28vEj%@r%M30F-7_##~0BY<=5moYte}sg{%Yl_c}#jCrV($H3lt=v|2g8iP@d1 z|FNG~UOPhANm(9Us9T;@9zL2)Y*qWY?G(U6(5)@fj*I1D(j3WF6em>Q zyRN3u!Gl+#d!LS&=#&{X&M>N;r&K($r!a#InbmtGHH|}W+s#7}gXA372)8Y4oI8IP zeC1(a0Yz_)0`&4N%1Kl?v7K-i>ry{ z^j{mC$1`(>dLHGuAH6l;H2o=lvSps{nYzhJv*9XB?uU&lWQN$hvfIK^$(@5w)L`;zg6^Iv#!@X3T1x56TyLp>j&3R>~<-_<6{O={B7BwD)+VZ2pQodx+Y8q*? zE9+$WI0iSlE|9`X$yi;eCm!g~_DA>GyZKm}A0;KFQVI+rXQ=$7hl!{Bi-!$rHeB@b zbh}}PjaPQ7G^ivRkwhv+X1)~-CvPt7jK6JZXlrx&9spX_vd8*q8$E`o^ZcEfZnz7X zE0hW+ZT?h!P2;{+Ctdg0fSJ{+%pv+^8;`I~_uK~7`{bKi7%iELvNMa^pO`DI>?&?J zhYpzKUh^LI9AwM(lv`1x?N`KNTs%0llONl8C{hOHZIS%uL5nlYMm#MW z@xRyLe`co&+UMNu}%Ng%XHo zOC<`-6}KcbjddZK=9%q9*a|XSZKdc|!_$1k}u*2}QF8Y+c>2!l4mX6l))RV{s(Zl`4_uinz`zEKJX57;p zr(@CEp0URo7YY;*X1+O1_y8y{652!)PO=iU%qQTC0%8B9`<5soJR9|=G&pzIU~E{q zSgX)|(N7?(=nSD8RGS#nfECr>Z_moBx_a~U2~|pB#c~J5>B67H`MHj#*YF(Inx}o# zBm4^43dfu%so@cn`FzDNvrPV5f*6$G3Z`E*EcXf>Z49wxb&ov}UnFPK`1(rR__!Xc zdBt&Q4RT9+Rz`-Wkmk~AD0_FJc;0KMikk&o{2_l>6N&|ony>JyOl)ONphm_vC-3oR zaN*0xM|yXtvNMA2z7CdSe0Gr>OwUi{X^<8AckqQ`fX0;yX^~5spFsl7GH29i>F@vL zIc}hxS<1;9AlVF3r}?#|fz>QlOF56t#1vXj?n*xXIYe7CBZd=N69VS5DWP5kz>Esk%=6y zwVLOm$?aMg=5r&desyKN>_+do0T1+f z4P&z?Yi%H^L~KT2YakRPLF*B4Ky&y}vPA4P~4A2cUki-lPMbt5>_B+mZsX+5BcTsj|psG~tbEJ3vWz=x6-u zGM3mJvInZDbE+lwWKmG%XJZo3c|y>0gtkLh*Q#9IhDYUN(z3|9&G?TuW|EYjg)b?^ zo%XJrnw&IC^xWtGSj4d<;SSQ5U3P+Am?{R-tG6U@?Dk(Dpg1N-9P{1)eN|^-v8)9| zzzgd}kD;muLQX0UJGDzw3cC-@9Hai@ELk;u1Ug7*4J3jr(G0+PC5__;)97-MTdgsN`tcacZ;doCKgZ z5i$guU6t9TEUAn1`^UcdId;l%&%WVKXa|7L&xHVRF=A%!c!w}~d3lG)2=&a=)CZE= z4ax=34+4_wA&@eCfHM8R6)joO^-v$gtfu z7qjwP#U8O@W0(kh7EA zO5K1YvJ80vB#f=7&@nMFVG(mkPk`pI|8(p^>H9D0dag3x1COa28YaD>V@(B+^~!H! zB2Ybh`Tz-R?q~u8iC|vv&RHOHx}aH`QCoX=e>v3qBiH`4B&nl6*BBP{3&O;`q~$k# z9Gsn%)w4F+HIW^Jv^EizVb96<9lmPEe0{Fx5ulrMy;-qHOVWS(s1WAy02oQA(w%iRgkXHKYP|d7&B=egXFybzd6r<6V&hKa zz|`VxYpKno_)v;IS{Ohg?|Z|zLHCU9&Phe6O-sz zo2r4#O0KR9Rc`3bZ$<$d5G!oa8`an-Z;!NMfm;~xfNt;gHkVpt=cjV}414tClfvYnFL7bjfRE$cpQ z+zbr8Lb3I5vvc*X?rN{LP4skvd+XZL=xCfnr`)pfQTE&YajsL*&j7rdf^PhcP|1T5 z*WUrmClYM(ri}okO@&M|1xUXjxap16={8X$QGIxdc;grEK4_CR3#~uV&NT}7@N{Q` zXmJ3s03Gw*C?Kk0)%87l7@3)42{zTk6=2CJ08X!gb40^3zIxzhWIP zhUPT^q5I?}QpkpShi)1w=!&>3~15FF4(B*z*KLNJ~>6@yUSbRptagF>rWHl9tVSJA&Jpq`8u z1N>GGCIL`Ma0*Li3K$7>o2U~G@{sD2Cq|Il-*F*Ky|8H?P%-v^uZnIBKq@(N%p94* z6#)TJ?PBZTQFie_SlhPK#tNF6(FnpEb2~J(wQs6)3s4e$xGQTukltHFnEmoq1iQq3 zYg2|0w+HYM?g-SDDOWVW08v2N)c%>Aj7H!g=~#eXVi5uhRuOm%YXAu>p+1HuaI}BC z@4L=iy%lhUhv#~o2a^VL-xc5>NQ;2iGOe+~s;?t_=9Dib`!&daQ&22~>lieApZXT& zx;C;t3DuT877p?$O_Rn!VvX>=?ONxvA#~kQ}6| z$jr>lZeTv;Iw7a4si-7@yIdWS5Es`5aK9TqJEnp(*%WC3up^74S4Cc4-ne9TX6BPb z2~U;+w(CzH?p2pN&R>$-0Vh;-uOW>llJ=g{vESAJDWO$l8O>ku_in_;k2C7kI}t_J z!l*{oZPyF9)l`TEV2L?8=Q8iG!_)tu<7}BCnU6R;61Z z2qoawGPn9f*4u7vXec8f2Gn1`yt1pdTWrktww4jVBrbZ>-{3}_(2PLXN5IVxgywc zSATfNZOydD6)KUMaeau@%1$hBm1cmgfS||&z^P)Z4U5H2O#4iI70%h1?^6Z^s8#yH z#4m9P;I6PQ0BcZ?)0%->rMO_j~# zyP}^bWB~SM_U$DF0>1#B;%o-+mEu7{i~WbiknSxTJ#B6571h)kpqoaqRsg3LK5aN5 z=g6EZoUp*|9zH`T&PL!YKAn=sh}v*~3~j!%HtPmo)igAsQe|JJYXG7N8MwUfUvMEj z3~=RH^dPdHc}s%7A#{w=?dAJmBR<5)cO&_A$0wd!fS=qOg)BJ1thNZu(qM&aKD@~m zjlmS{Y`v*#>UVKR#tUF_R#kSooP_}qn3k&LU>j4Ia@p8RS{Ah#ON18!hqip2t5NCZ zxK~XmFiU})WR4I$bLlBuJ(UJ}UfU*)fYdY?)3XRY1Vym}IentK>qM??_ZGQ;Ht4-f zJRW0W@)bb!9>_Rw&ysJqkUN9edz-ikaAGzjlAl#wubqmp8jV&!b5QRdp^LZAk z3X%vCwlsH~?Mr?rPV2K>vI5n6d4R*cc8C`{KZYDr0IkgG(T=kLKnaN$ppSR`>u)WV zOb9(|KCSE6gPy&qh*H!jeeo4`Pqi;ZxA#Bc+_6QeVk z4&{Ch7*d;uIh%N|`Ja7R{^5W4+*vdr1ainyBClPoH zP_*(YIF^Bk)UdVu#C-*XciY)q%0@;F3|wcjOwbw}1&|ACy}hcS3%K+SNKhOcnUU7_ zSPru|j5@T)Orx z!h<=6en>@FKU??%Io^Tn+G=S9x&Z)$BQePcE((DbbH}}Cu-GOWtY9`+U zW;o$@j!49H7r}85;7yT9A(|V+#yp&CdpK`@!X-NDvbKtfAyOCeN4<8V_x3!HYEFOa z4}*P|qNs3*&qGBW>XPSe>^H!kU2rF)LL=WJ6+RZ$y`G?kMn^+B{Do`}k-rM@Vm*7H zWED%~rL3&7O5yZQGw^?kUS3tR{kbW(rlO3S0pC1rw)S25ZImls!!7&Yl)V&31Ljg88 zuu;g0zGirL&zuCsIErI$j51X{Tkn#jGsqveSy zV=#uT2ng`NZ&bQ2?$>%~#bG;Gy3x`SIJyDQ3Vqhz1k7R=F)#)YS z1YuzeK>g*^!wW;Z+0y` zqZd<(_J7f4Y{+Dn^rn3uWWJbj+ugaAAZlqk;ox0(#yDI~okOYK?nVcxYk29#K=t=d z=kWEeofGQ={=S8uODgHbc$WFE63T_ETgO`YTSgdfx|gYG!lMimZTWh(L9=_0lMl(-JT?5JZns+3sbeE_9bh7JRl+U|!IpkD)nK;K&p0{e2R3EZt;-rKaG{}(N zBAkP!;q2ba%qzRp3~j{UvIZ3I?Nrk9%0&1$o1szfuRlm`5%sAc?`Igsu{MuUw)^P| zEvZx$ldO9YOj3^vd2&)M_C@=?7!yl!UZHDFp)oFRcz7ACsYU5cuViOpqjjEE6h6to zpTPXi91rf;bK;VRQTW(MqYf(t@#0i6@NRqyh7rCD8NZ%JL|nwzhVt-^v1jj?1o7aa zsj!Av{Hr_Z=Win|V4d(60m7OZ>qehxpJ)^wkI0dCWa5*K5MHhISY|?FIOCqs;M6di zpKxU?5jk&dsaM~^lgP1LdYp)%u(|$))UtO>DAIoip6b%U9Y1)J)3gW8+1!t3Lb1Fc z`aS=6lS$)D0qQ9^D|I)uJ#6y!e z#CM770!&xX8di5LEGVqnB;v+o$Wq`^nb-2@K$r1m zNoN-H)4QoqWl333WGodkE>XNrzPx7jMLDSGnPMF_i%r+X&b_j`H#W8`HyTAANYwgn z(-C>7Ka1WcKTfN+QKl&ARz&ux3_R*ht}ZX?a}|c7S3$>zqK3{ep@{}%%gTg7eUObP zq-WMAvt{yS@)0hwpkQ!}-uv7T_zN1#WKwFH#-};SN}vtMc@cW1CGXZFzQ2JkJ&EIx zE*o^Sb%0D8E4ZwtsT+F=_2m)c&W%n#xYoo_Z~QIKWP4Q?3UvaH{J#Je|E>l^c&y^G znhC7yw{P-f(cpIN=MC|B$@GUVqSWC750euL$)z0N>1)AhbnoAkMiyP^#nEP&V;t}# zR>&8c+$C`T&*=V#sK4s7uUM*;&s|)b=w4Q&^kqWD$iP(mE<&Gu$E^RjhA%?{er4J# z1AM3x88fx*@&gzJ70Zrw2?Wb~>Oqg9Pp|JrL}as)jqw6!mO!;~;%39+j12H`)7Qs7 zMZ@IozUHyGdF}LhTynd2<*D^!?~v;dCvi-cFeU#BA1h@5w)MFm)q0ZsFoyHXUd$<} z2qvmyJ2!Ns4Ja5))iiH}nvvD0>&zz_LPERy4(6b4_ZvNzyl40UMs?+LqZW7Lp|qgn zoasV|DK5lwE? z3gqF{x%2!yyPwiXXZET|= z58HT);_x`B*59|uN=@_oIm;zhTkWVi`@`zi7RMf!B|RXNPYSulwgh=RkL*p5o?^6Y z?n>ZCQxiOLa}|hXmXBsj_>W556Sw-i%gA8#lhv=M??ai4K7IRdEA?$p79X3Dp>957 zV>8BGZR{cJ$D0M0PSO@ipJ_JgiM{`=@M2PrjYv}TNWz0%7{RI&wvk%jc098zLjSvj9P3hrIx zAm&IIXiT^F`#&d4W`D`f#7pBVo|w^zAQAB)ql)E^py=Ff&5N8oorOZmm&6{I@krv) z&L~L=yMXr&pBrgvELw3VfR(D<34Z7a%ei7j+1c{-iW}aT-NFsR;Hgd&rDW9$+{v%L zj8d@1zONw?=+!jOQIt#G2xWQ$mu#Jf#ND0L_%?m~(3eg44=>K23rAx_PN3djQQ0&hDh>bVDEs( zEI0!B<7BOeG$p!M%rF~u&ZV|j^!rdfjBMj>gz(w)yz}X`-IB9`BLHaI9)aMdwWqjD zQ#&O4{XEX|9VCssTD6~M^gV2xf)WZx4#9ZClXHGB?H%u>dH<;PS{Ang$K@OQ-vrIfwIlXclweeXw|Y8L~?rjNgpDtoA|&QdkMsNe-@AKR&qe819|+1 zbrWUFWsfL@w460TxCyV}nY|NlYHJ<_Q2u51%rSvU#yx-tt9?TDHY{hODQBbM+P@1e zAfkL}j=j8OxgRDGB?fPu4k(QA%l1zq_qYArIClwN5vp~{@^P=ln5^G>e~%YNB%hq~ z0kwDh&$n*0N>trRH4jq-IxxS-g9s*R#;Ic;4C7C6Qf&I2gG(F1{FC#tHFpf>BRxzD zVAv?i$6$i)xns3~JXmcQn3g|e9LZD)|1ABT^&~;lqzG8-5Gs2aZYF8ghr0Cv>U@zI z5^|=BA&^*qCSPNCl@-&)*ax6x!~G=v#*!1h6C zs`+4oYEP*D4Qx7ed5<~ZSXlIwc_J*I&)3UGL)0`oo_P4Ig^cdE|7@3sb)bd?-eZ8P zylnXjPH2$TsHkbqmx$PsM~~f=bjVnO6zFn^2Hku4s$GC><9*WFJZoqUf=uay0P^{s zyvFqOJpO2m6eHix3!f#=_cb&%s+L>~^!#d0K5CCm6eT6^VI2+lwoL=qrN7@cpjaml z6%hF(yfyuGN-TUdv5tXHniwmRp4a>P&c)NUL(KC*=GAN1!I&oj!V&E(&l<#x88QWe z4_W4UOYzk1B?(`m+OH_o$y}&mHC&1{Bu4UY{8R|Uq#iDKeEO~hPgp>2j=H99RJGrq zTAhL4e#UG&hS&T{1$vp+y`md(1t)9FwN2=D({8?LAB+l0L}~N%Qd4L0F$Zk=&X(BHtJyg0inS6jnuDQ99h4POWN65oTD9L3vTn zKOainBQ%v4R1tjuRU?0YB=A~Eu2Sro5IjC{$7PD!;G;Hb2)6C~intT)s3j(L0wl@isS3*8Id=^4;F(|l0 zv{Kf<@$q{<-uG1KUfZ$nmtS7SdEVgJ|B$80_9>*5DYb?6eVjV0$6+>BYJUzw;Ss-4 zPeEKP^?k$d>^wNsSZ ztjX=nx;2BiEXd^NYYWZD{<0r3f@ysF?QBZ78rYbF)}+RvJib35|IJY3d;B4D>XCx8 zzD%zf;C_4fxi2Rn&8umuPD}^NR__|8N~)dE(tZV@rH*KbtA6MAq)ipGudOMQ^D+oR zyl&l*BDbYY>al_`{qngckaY6o8kykS-a(Pyu@AG%eR3zf4^joJFwmEzvARm#WFVILk7IU)6a`_88w zCP2H~o7Q&^$2|FuRNC@nG5zwND3b4=3JQj1t%C@(zh9WKETn2Qfq&%L-%!I7kdn`6^U=v^dG~5XPiM%ZndWyfdvP^~ zRr~yNtLwIe{~p)+O~JhLjPV}!560H&Uj~yMJn~RIm9pT{&Ht-$GmmCFUE_Fc5ld`E z3|i3{dvWy$rCN>A-XoS?DhndE_B58>s%09|7OAwgbxb>%hDtFlLL$P%5PG*^6FjS zMm@_jGYwyRnJsP?Y1e&R+kSyhF`(sngr2P$THkji9`9JyF)umo<~X#@dK})8s5Cz( zEW!AEQ#vUTlweHk)oS_azrCBG>qX{pr*&X?@MC~3UZ~s;DZiCg@OS6o9$VH@fM>xN zSn1wL?n!&f#z*Z;Pe|>CdO5-!!Zu`b6c2L%=P`btp`mnKgt4g_8x&rNOg5gumo{{t?*EXY>@;N5#V~=jvb}Oa!7&0*O(2J4}C>N zOib8)p78M-fH6hIPq~@kbn$_EuU2cO+gi2Bvabp`EzToO{-9a|*@z@4V+XKJ5I%s! zFwMm>$zvF9raoYTVu!}uyy(V5eU@FXBQLpu|(SJ2|#R8p+Q6_upqtE5nEk+7~uKJE;4&7OH0eqNg0J2 z$s{PHjEWgx%n2WkPW)F?-_02;U65jFy~8zr`FY!6CK|pZXb=rx&xkZ;Q?Ig9O9Kk% zGi*jTQs58Yi#MdXi*&es(XlURFohSFso!=!r0pk~ax`V4j=|=FKaLyEJ(+HMAX37< z|M~fNOWp71H}=CBo8;U~{kKBRF-IZ|Gw}7$bn$j0dYWu=3U+W?&>8x)^yyD^mXw{7 z>MXpyKr7t)|i4dxSXz%!}on=-*uC4=tpagj%@vCcr51e*XqZQD&FJs0gBCs3U(v zpjtdWD;-~kOZ8yj7J0qyJ#@yLNz}d zVsX4uiGG_pRG}#i39Z%Z68R!EIRxP0F@4`FWtpnbc|tVh&VeR6Ibb7Vm;y_~rK9a@ z*YBB|mUK#bG=^y~kWPx_FwpRVdb|D6>e7iY%m&%%Mw@7)6KMinyoIa0N&4YeQZ+gY z@h)$W>S?Ki@NbXZZ$FcIH&CM<7eOon+B} z$>I;N#St?2j9M-XjlfZkRzNF;WWiO`T}eW?d@^sU?pqK>PX1g;fN`|4>p{=gau>@q z!hq?U;VpJ+I6)-Ak+D^Bz{Z$pD@Lhv$KLwi*kVhV6K3e?g?qMnb6GL`BMzzp)}zUD5UM3eL~ zDz1iGq`Ly(rJgOQ&zNv&G9L&4ng9c=8;M;$zCRep+GD*Ef)#}iV1R+Yv&X6VHYk}S!ZKxOtk{ zZqZKDa(-d=OO_muW57obuu}pyYhC0A+qg6V$lHq=mj2}*11XA>D2*)lWV^XWVO zg;SU$j<&>zrbeaXwIH?xhNZfU_i*-7gu*rT~G7S zBpExu_#P|ff1S|vr4%6{^P6GXgwY%p?IJSkkq^VwGukCKcI59|>*76ibk@d3t3}_< zc&*u3OMk z`=uT&=Hy%pLM$}Y)>V6JRxt)bEFW}Ydu%k+XNi&M2_GMdC0rD z_YMrS4rUm6%r75voB4l|_vnKE-_Rk++<6`MwN);;s&+`-{tv`povsspAE zH+!GwT8s+!5t^GlwXzCKNl7W;!MgK2i#hcERoUyru_DXcJi68BDE|Ch=u#?gn7@ot zp~V~bowYy3bL)<0r4aGv*VWbC9#(tklH$6%)Qg3S`&{;V8&|)OiR%v(Uvm_Q=;;X%n)R-%WNf;jc+|WrZy&M5!MuzOYrK}v(RkEs z)q8aK-A~5K>UOOz8{$!KaaGloiHV6sZVO`Hi!(Rb9MxWj@GOQnd7iEjo4D1+ThRGCSW8_KXn#9qF9L7Dh`dvp`dv;+VJ8rbUpP(z6 zerqSSudffG?z1c?D9C+s1t-X?xuSyW$Xn!xV_o0!NEfU4*e-?OyPVRl>z3vwyH6Eh zcA3N9PgO6Mm2o_I^2BiBy(z-Kp+V@=le>tr4>iXYZK`7&c{+)LCF(34`)gxFMNBlY_#0Odv0t88&i1`GxculOT8(1ak+JMl(#jCTUwr7;&k6O zw4SV#Bl&i)J`wnHD9>bLvWkMk5V6^O;d}b4n?1Q-Lqo&3H2^&{G_1-qv^eY?? z?l$tF5wq|ExbT63X)4LFF5MW}=dWLrJ~20!ycz@Ejp5cUIkkw*pZp|cJA@N3rMN;!2C+c+nou8DgEv$W8MZZYI__*ee#l_6BGQxs_ z0xSZ8t{*D;dnP=?oYv0qO9fW^-2*JYIyXMY#R-H26GtXlEM(x`z zMZcFtFT(croY$vn2vk&5KK1sZXY})F;yW$(b+i+RzNoC^{#NU7$+(;XR-%2(zzY%o3c(&Daf?roxm&j#~4-TD(gv4a{@CnX~QTuy>f()^r-zYP~H~dgZw>2X4 z`$z22mfuALm^C*q?|kFo%vw2h5+gX8zHW4w`yW4^yv@zMDj=YBGx5Or+vJK1h)wym=2Gz`XUcQ8hk0zxeD=X_?Q&ZD=7Yhfc4fcb>>5+r?ZZDr;)fB8F z7|QHof&oT!S9>6VNN9mt-c?~?TAUzMOai)(FhMBK#QbY(`Fk=%4R$AbdK8gXf0>-^ zd2B2RF$oE$(~{!p(L(gJAE#N{(-xnt#>3gArC{*GhGV6A_D)XiUR#YJU%#qi1xfTq z_*)F+8tt!+B=S34g(^Jy+{-X;8&fs=o)oc7 zYgM?9@>y6|2iODr> zZEbGx%7z9SadB~k^99BJ>lxC3z(5sU-C_@HGhA>EA6G|;94^iduYvF3vzr;FSsQyt z4^Q+)orOod20j@R6SKX)9|X&Qd`?>05`obP)1E|5&ncUdosVV-?7diMZNefa>s&Pa z_6P`}R-y^r9of+$;oy6-b8^sOT;tU?)O3EQJ9Pb&#KZwH*JS3W-M$AYGD*UIM%qvp zofocuVKt1l5L-|W@OxKp3ZM88x1*EO;K0DZhB+E43O)_L|3lF;_2qGJ zpe@V0I}_#^;%o^Edq0Oh_4M4gv}A$!K8Tq;yn+>(c;pBSTnfXo z{+}WAzE~m|(V3DV#>aazr6U4r&Y;5@8wmQMnl>Xcm^g6Ich&_37 zok=lqz{FEcv(E7bozI?1PZAfVk&%)6&zuBE1iVhay=0W+Fl<4joe9wq)VpnULNFk@ zawX_dk%rOQXh|~FMrR~7V~i{zHFX5JzB|fgvO5Fbqtny0KY#v2hP3(ldE{GQ+?kDy z(?7f^FG*D_mU6XBRG8Q6-B+E?8&wt^*j?<^JnUvw4FA=+&M$iZXx#T8|F7r!Ugv~K zM@I(<))-+F??`5&@e9ZkDjx8uidBXF_!M)^<)Gr`p~k$JE$bvHH~!+7iASf4xAS{6 zdDjcPtAtpt@)`KG8?^G=vQ;v1rexf+T@8ZNltfr)GM#gmY~5@kgl^(j_e8ZW$zT}Y z|J2wOu;Hivrc$Nzbr!z0HGye1SGUd()}4>=yARJKZ-q;Ezag|x!(eFB8S$MeeX}zc zX3YG$`ZvMnPLUF8XC}f#xN2$nb}dbD5+R0pRN;k@Q5tEN9na)=xS@?KYl0!$QqM~n zRNI>v?)66c#{Om}DYr*{peJ8yU)XuE8g-w+S*kJg$yn2%uu($nm?p7HeOCE&oGS6@ z%XGC%1-fCcq$&I2xs9xA^LVbfaEwgQ8D?`Obc3A^B_~DU<##75De*@wDGpVBzI}k| z!!VmXxz#`2fz7qG+fnoQkx4;wooXbf(N*cL>577PSya`?BszA9s|s^C?bpN)rPN1d zTYh0-i52h7ECWPS>k9jSj8)Hn-5|5dr+gk+^qJUv$uRq`oP3O?;dfsmnUCG(^j4|1 z`4R)_vw<4>BY&$+p^|94gOFppsi1H&_3FeL#d?7<2V1TA9vW+8dQT%Vn|z`<&_9jd zeh)90-&@ZOMZrp(t~v2pWFO*QV^{?av3>pA+_+aSPEyeRis4fRv^s&Lf8PutsBJV- z_akbUR;Lx9(!RItWf2=03Dr%C!;$|~iYvuU{GcBX*!%GpxrU0xChPy}vdB;R+({&J zVxLWCx08+>7bGEZ3l{R_{Hv-VF~4$s_CQROfE*W3qh#*w+h^^#!;YVOKjT_EV~qcV zxq&TMg+}03xJY-a_~!0ysGKU04;<#2IB0a$y*9s7Z7g)L)=}qM`%OJg}QDhQjMwlHj&}i?E(E<}25=5?e9b2mij? z1}2pgy5}JIyyiCIjgt7n^`z8PF^0s@cFb>^lSar|0g4T9=ugbdvI;(Lw_t>4aqxWD z?XJLxs4ryP(CcZDHuq1P`KT4tj;Sjdjjp7m^y$+lF<)O1@Ru($GNNC+cwrm3`Bs+w zTUr%S-3ak(J}vW1xnD;e|33QOgnvp}S_#*XpOX{U`-%#@^zNZv%IxC^?x|){&AfPp z|21C5kH%+u_1B5zYBEx&e6|)R*n{wRbta(9EF*w*2%a>0E|EF2C(Me&cUK)PrpQtv z<%34uBaJ|Y|2^F6H*U1Hw@X@OgoI$ediCnVSt?0Lo1}_<^2Rmvp59mm2EuW?Y?PYJ zOq5%UWc~Dn&lF93}>75TiYGv2D8HjYEPd&ZCM%2E_dHCjyQq3^7_r2*LZlY zeE$4d8eIoLF}o4`)IbCwR{F*vb?-CO-qJ1OcYi|Y250%{^Jl1DKh`=da9EArS^ri? zLd|PEAg`mY{@h`qlkw_RR7Ba+!K>CY4O&|saq>4Nr>1!9NaEykMyEQet9jG*(xTU< zFg96Fl!myUCXY^})7+8}*Sn3F9oQFCq+hY%XjCm2)}GSORCcasg(U9$J=KDBm{8LD zX`Y9AY5R*Y?Q?r91o3@pIyyS6+8C?zbi&^9%@K@z z3ir64O-_spO%l6Ac8ve3Nf0WqQeD#huHms7>GV+Z_Cw)m`lS|4yvNL>tF%vNH#bXT zU8g^|@Vjl&eEISPQr(yF@xgMjOqo?xakuoc6Gwk8YK=x>RNfvR*$5J2;J9pp!Y3$5 zMN3P|eCN*adcET!&iulH%UAt6M|404BxPl>Qd3irkU?g7Igy8l2a~~Aa`fFWaS1dm z4}^a_FZz3hI~AXylJ$1hBJhwhk5gHlEQ$ zoDQn0+#5*1fmNqkQ~lFdjzQNS^B)Jbch~*KRy|!rzRvvX=eE*^67OFek2P=qUd~u} z=XZ8HHFYyGjzZ8meZc(&gadbhke=BBCPy zWscFWi1dkqcrX5DI$E<*4DtW&RP*adHA=LDkSP`bAa5+F%=^3~8(v~BK$_ciWH^??f@o>Aaxp^(Y;ZP_g0?Ng z=RoWA>(_6f6oZ0;`S$I0fSyWznh_=XzD}ZByVN%wB=Sx@T*b3%FK76FJ#=qxZ^uZ{ zV-!SFQ`7e0Atpk%(Q~92+cfd{&yKZIG@3Fc2)h5sx3<@?uLPu%_S7XzP zk;oiaUt3veIo@B}nGGb+R_Y^WCdFa3%ax5fc(yyu(@)hRb8UI%3mMD>)UmurS~>!U z82`Ay7nZ8ywGP)Hr4E68{eqegA7KypRcMECs*uOmEQ$C|jSXqK(g*I$Uv;={{btQ0 zbgwZOqDhdwjv?AzUCjpAB{nrOhsw6B<>ch7_1tG6yQAqJ9E>pxNZMRmP%XG9h*u&J z3IaFTP@lDs$-}b*Q#4xIHT#|+fe5P){4{g=wz9bP?Qxd3#8jAF zaU>c}$%KT2UcP?)#7Y=G!y%3NZFE#ax5=B5hlfYdX9$o?jcFu+rx4v%8&1c2FIHZRBgrUw0oLZBvC@EjxrYgPS z0O7rh1px;RuFV;-&@%y^X?zOKPOUp+Yt!HL+_dFFMuO|Te>)aySK-vy zeeZlD9_UOJ6M|1`sd7ufn0?n?#4&@tsZJHIE!I#jZ)2BK$FDJX&}EDv_$H>dUk= zys-OFE{idBFcPlQWTV<@Y9>|*7Y7UXwo+~`2G22W@*B*cQWer&?vKVFlrt}`ki9P9 z==eT}wYNR}%Y^2?%kv>-ejroQ{YQ)pAvt--`}cPdEr3zR#KoC9+)|KGd95eh=e&8< z75rZBd;B!+gC%R+k4XaKE0A=Ff?^ERZja|0au2)ry3!{is3wnA?9w#DuQR+b+t z%A`R-lBhgWqCZBnG9TGWUIUvZ1)tv5Ch>w!Slhf=nR)o9x1HVig0}nuhWpj8U3iFr z+&4SDS2naug#{1M9L(@}J)w4h&%AlVKuSuwvM%cJ>-yDe*I2iB z{(z=HknJ*@gh)OYBSZHLpfYbCpK{NA%j|bUlwN09GC`iRB7Vr+@@@HvYTQy{qo5n+ zzk_qRtOHktQh~$t3n{?RimCiWY;0_rRhCyYN_8#vI^b_?y6BMARZHYn<}&LMGd8{o zxlGm9s+t;;FRyPQGhbLUOu(mVf_u#X<5X)8C|er9Lx`D)3oqg0i5+*q;2^6z4KOF->gunT*kd4ZJ?_39`&mqd6d0I2y0IF(Z`7kQ$UJv*q#E8$ zUR45;r~3P+J8dH@Bd3@o*Y0cXi!r}dy~R%xCD&Kor}lWc&qLkL>2I>))#oQv%jr!w zy}rbLNFa>vS{rHXdXlc_P@N!DRa2DwJ2fp$={#LtUVg9CdB!9*KqAv0EE@_Y0R4jr z=%WB1N+z{NdGjX|eoSJG!obN%1kkCVMMXsg5glC=KLoxhf&S2;a)^XiE>7L8@BN&jV7^nWG9hQkUKfD+H|j~U45>7`AOe;o@d z1`=U&b5qb_`r9`S+i(1e86va*2o-6RNWjyT>ec7QjuNeWGnsSVq5YPhFgXw`#-MpQ z8+9{RDFqq90A;*(_bzcDCc)@2XYk4jkfhFU%F2{0wg9ykWbZ*3ZqK%o6@975y!b9= zAkrD+Wm)Z}Us&z6Tw0cyTHp3h1uHkib$|gIVt81+;p|{)yvgStn?chHfOlshHyo0y zhZ4zqZ%~HcbB~$nL278f$O$ea8)mGZ`{G@*Z-$`HEAG$^Ao?DMIy7p8eAUUDh;&c6+!WO9_dNCaAWV3O+h={~JYvL>|db7#kaxS&gyXF=(QJ z*at}>v+wDSOxl5tiD5T6EjUOy75z^i2C)$19(5RDjOoJOUwr@D&331qsD@Z^m0jK) zf4?{n0pM@GZaFx^LeEIUe>t(LOV6TNvJjHY&O5h0GHovtlM2q4h z(|gGOk^hD5tVCvv!&^m-9-;e^W+OZ4?dkXCgoTXg&VX*CQdh&xuWSEa7U_!jzpev5 z-U)TCik==GA0J=uxmJbQ+<3VOS;A`f9V#j;@=}(T8OxV*eq%jbl*TMie!8HxegGHl zce23;u}|J=4+DXO%^?O>hbQLD+VdA{xAZ-1X+Qnw75jRtRyj=i7S|Bpf!R-&>No&L z{@@l+dq*ne+xD+0Lam}D*^Tq|<8Ovfdiw#&-%48&mRTt4N*h@lA29G!ffI&;a~_^^ z)LM-+XlcppZBC9+=L<@W*b1~&hd%4JFiE3{PjD-Gw*0Prsx{2UnLCYrosMRIBQb6H z{wF(K^hm50P=RNPOzjOGy8xX>6&A9jg_*D8Rr?OM2Rx^kzL*(m7wwOiv{>~17ojLu zcvn%CII*)e zqr#nwi;Dy7868oqQ`0wl`HEM_M)ciX;!Csa>OONy@A18To+Xh#&HejR0Ndee#03Mf zXP)kX0pawL($ILNMQm#Vi(T_swN%a>?5U~E;uVFF*P`DPZW$%zhEqN z`E}(g`lJjO#^lJ6vkbF&b&E8hWo?Enkzrx2Zr^?K>bAp3Zq9CP*)a>WhC?V{mI;}y z+p|(05PJ}Z)@93UtonLmsSFekxhCpZk6Gf1@v5V~x((_8{@O>r6M|c03R#kHEMXG)R6@L zpQIagb91_$(`{21Vq11?t>%!PM5eMK%onmci{cK}X<3+`h-ppUfNYW3qB45Xou(jl zgBB(RBoW+9gOK;{K;>xy*nu{CAHZGqzkiuV;1GQbRe`-`=uZn1@i_+*j!2NrKwqTPu zI5^G+8A2Y!z?}RU*^v~%wre}rwE*s^@A@UJaZ?|fWINcI<*hRN96lb73Wf5|ZN>mIj%luxG|8>IyNMIJbIgq4q}OYS+a*wO~G`;HeRREYxTA1letFAYMQk{hQKK ze4uMEp%_rn(TM>nGbx zR$@y@NkL66Eh`(^NA>veJN>F~U4zVm0&Gxh{8h73hrw>FUxjDJ4H~$ZQJE%!wH8rcROv99~FN5H00FJSxJsNk2L!la7L9$)ft& zGqEyU@be%G0DgJCU-aU>@=HtEpTKt_$zUY!ef4Tv;j5?@nVT++T4hdO#OzL0Mqk8A zxgiiK591g|A0q5Tyy%vJ0{WP*kE4*l76j0;?evEr;ASDf&ySYq0P^+G;QSZ4)x86p zAc;(ih;kX&jL!Nm!|q=NJ;U(2lPYOYLT7MWf}h>a=Zgx*$DCg5=05)xSN2JJeYJIU zjKIJHLaKej4nY8TGf9@J>D1z=GQ|%c5(4gDeI~{*u&26GvYQi5RxIEwS&{Z?Hsp#N zW8=ukW$G_=xBm&}O8QKVT)@?jQLCHYrIe7AjEo1ISqva+U>kuvE(0itPL#{y>veE} zQTS6zJ;pXr!o7-(jY&=p2O>VU{yC+wY}vhZS=<3@^B=?WT@Bh1sVTrAL;(~8L};4= zd*gGcb(MGkQKe>YKD{e)ymBp@z1ne6e%oD0{qbXy!_8^Ua$|HLI`MM9H+t1bVrnyK z-ff`o3<3^7r$2r{H_4~pNRfe`MVjYUZGC-qNeLdz9hY8`Y5^KhR7FKa8rqU1uN1Bm zyL)&r-n@wk05ZZKAh2uv{1iy2TkiYt@N*=@iIhSBh|ZYMz0Wq}3-ITCDJcuHPpT|& zZitU35hwKU#%HA14fXQrd4VM&T3RWk+H7$g-;0&X?alQYlwqDU8{ z8*ve{OFgX@H$qJO3BDV9tgaTbN7LYUtqW-`ctItQ?A4x%gt=7-D%>0#%8we&ON5f< zt=T;+t<-wu+7@;c|0)G|Z&8dW7=aS3^ZHtRKmaO0(n@9M{vSWyPvv)zusa4l6A9>S z#0(7$YnDsiz3bZT7kzi@MbKsP`Oh~qhH1>FXj1(bm*<>a<_$NVD;EpuzR(V%QLQ6h zi;A0Dpd{LMtx!y%qF%n2!yN9?hw$36s5ZhghZGtl5S~7a<(j}_ysdh8oj5Z`$}J}V zK=%b8YmfGZ9;)c;&un;{ou1l*oP_S3-NVRzJw4nyt*><%5tnDusKOqfKSlt12)Em! zz1^Nmd1-Fmx{g|5!`hn8ChR!6vV75w-9r@<)v463NNB&a?q+dZUiv11li_-QRMUPx zh>qp}8?Sm;pgs@bsSA``g*DjE3`K}UY~8KCHps4~a$OF!k)VF^_1nAkU z6aPO5Idiy#q2Ze)8QLe8P*GdJB$~{YYa*{=DU-NEmhI;^tyKaF3(yc)^jC4{;s^2F z3}O*r%nX&8@HZd}w}PDLWNobR@(!qZIYLCS^5R zOCbf-ufHM4Ea_*UlB@t1=3mi1nN;6OY&BQ~;PV`LOJ4@;x>I~~kYjel-qaJQww=|u*@is|_R zy_OV0-73q+v5?e{n%=DP^1qi*Z`t49??@GJy5s8*aK-`A^!)Bstf~G2&rq7iK`iza>0-`= z+lN|gJJXrxZ>P7{5C6SA_{_M+`r=^0pi!(Eo%B52>2mn-i3k!k>u6vnzjuZ(^32VA zYPua~e7cb*!Cn2u=Bb(2{~~G&K99%=8d4Sbi z>3MFhbS$AYb!pU|mt__doQ>_4R#sLnObj)A;|CO%#yhndOOrM41Oxj6k|_ukOE)lD z+DEW6(%FJuY3vXAEOwzbh_d3q_U3ij49^szXj5N?@X*LLV`^@GO+aAcj=57=$a~%^ zkd5av1%VWp(7z#ACGGx;4@>btF^7la{8(PT2IJhV6%y{m zw4$d%EY}BKqk2p04pVfhb)!1T>9OaRKoLb3vWO1P!y<#LTtOw>lQf|KW}|NYlNkdN z1>)Uyt9OmT04RZxVbFVTKa&c=>KW4ORq7qLf9TD@N_<)$8>cXN#eHD_FO{`Cnpe@} z*MJU^aX!<-L?yxs2GF^)qhk(|hmVPzCLAuS>VSy}ogXdKI{zdF=&odA{PrgbTfGlW zM;6xB!zra!y*BouZr*#J#N4OTyfXE~O{NER_*Iqdb{%>14lD|tKz`X5{sojONFw2w zCcb6V)CM2W$3ZoFbPNn*AQj0P`JZ|0H6C$RJ`Zh5)_F@npJFb(5Gl#0Jn|KnjrA;j9#@+1@>wB9e6$N_dcb@TlnaIB& zTD3Lww

nX)%Nl<2vq1&$LK(;mhYqV?CP>S^n5M8G>Ob|2og?`<7|1`=TL=?Qqq> zmyyY`of*XV&uMCNrw^xEs8+8l066{_O5}bx?N;uz{2*1(1!pMlAtr*9octOG#{h6K z!IiWYCXq6BkntLHp)Kq@_hRq09aY$Ad*NY zxTM^r@45w@Kj-hcZ#+8>B^dsmD$d{>(um7njt0r_^XF*5q&+gf0JII%lB{f|a^nXp z=w(wL8maV9OPlN#-0hXE{FTewaNI1>6TaIq)E?NfJkcfG-suS97I#e2CmkDETpW8{3>Qhk{QYJ}X;(z%##PsCd9I&o@=EnHE@kkMi zQ(8)Lva*_5b_G(Kyn5*Yg!fUywhS8GZw`F(sbDl})rR{mIOU|6Y`^&KS8 zwIn#@*v0^sHyAs%BFPjxafbH8+G-7Ty?tW1%R?=->$!)q{TsrJ3kT|uUym-Cf84-9 zdJO|R-uXp`%oon+??W7UF?l^x)~vg+1B+Gq8gXi;YV5jz)wjDv9+1}ZaiL+L1N)uO z06odohO*zzw+gi4EXpxb(9Z=$Ka9`%5rvCT#zcAW0Z%gD>9(|I^}^te+r4sy=j$qs;G9@>*x3o8o=;8apS-7U9*v za5GZb+OzB&cSe3s<=dH>8z@SA4;u6bg~)#vR4!kus;lQ?ABu?U>Cqw^F`ym9Xed|7 zOs|67C9|A#sfXFjVi{$|R+H}rs(}z)h|I#-?2`AEhRrJ%!~R9fJLgQ~)Y0VG13eZD zHhO(ae=wVXEJ_x1pzi=j2I`yvR4RxvP$fZDX99BZY2O0?NjpJ@ZNEJ$o@Vkl*J37M z0!=nm4dv=A&;paZX{Gl^QzZN#|8gTAvKPea6@8Gz;D#%pF#j?u<~|-j?D@@>Gqsqy z*!ebR{l=)3uML(=FrjU8-X5ivJCmAqWX}EtY8vjtV1e&ycDqkD4~J(;R|n_j=Z$x~ z`nCU37!8k(Vuc!@3(deU?axQaD9#$`dC1O1MJIkj5@N~%E_7P z_xZh;!0&f1RBiXY^sqJ~gC6+)nT7og3yq3Ww%~8;pC>ej`#P5fX_)Z$I+Xp)pQQ3f zi&A;i-XItAsHv*azSFh5Tv&GEh?m=OWa!#Je_U6;A{>28P!6S6uTWBeCmqUv6km5{ zp@44mxd#pv&vyy(surk>qoB_Mx>#Hl>`B%m`Yv%9qdZp;QvcPiGCxc}T(|1*?osK- z^HlBAmpTz3^n82$=o+I~%ZEJL0FzeDmdL0mr0N3)AVZMfBU(UdkIQ?7M#P%}c#=E5 z$3JiOqQ)=1(T|8!s*;-jCai~PRy(*6`jXV}mlzHfeJ~QXv{S8WzGX3@iHTAtpZdO%0SMPL>Yk^ZK!mKtfCX@U`Jd9nK4!D~zLKS9VZPpMMxAF8uxK zB#=^bI`}dI%C`<^#Jc3?=XZ2;WH?oA1Lh$Er54DKfqx7+KRa0fLAMmh2k3l{jC#%K z-Dgo~1f4_W^a9ep4z=LoOBp@M$GRpd#ccUMKpg^ps4%#8J4lBQ%r(~dRN>2M`Qx2wH9d#b6;?XBaf zb0Zz*uH_)ZhA$!B_oTpD1PU?gb6G4>hR%lw@|*YzBw5rS=;}lhGx_?UV)o4FgQm7G zPYNA=ABaRgF%Na>&ku|#P&@d1MP9ckh9OTrpeU}lR{yqCQNxMpw={mzs}1h6ysyQX zi-HoeD21H&D1LZJy7{T=>Z&)geTc6Te)n>4Z1blGwf)JYfrd`2hI!B{>7d$606YFr z58E-XqQEN=Cb&kH zw04G~SsmQ{l$!0Cp&f{YywiTzvo?onLN`i7P$-2c?v0YyOjrri8#@D(oH7Vb8UL>s zvja`KM>wgR%*vrh^-4r@Go`b8R}m?KCMg1Hmc)r6Xu1*^$ZjR*w;0CJc=V|7^JpIK zI)T{8dHve91&nmCWk-xe1}ph1U8cs(n1%6PX`1>8EHR$DY@->`zME{@SNPjQw!Zx| zpgZcDj`rtv>Evb7_{6YB2b$Bj7xY{l8`fb5@<9^MAbHr+YSI8+!A?Do_UGp z+gz<=+acy6A0i0P=B29zCRxpGutRkJ^O)DA4`@on*1O$;qy60u4b1Q3(M5$%#Szv# ztlH7&6j@Iy?Td&}ypsz~Lj_^980Dj~j>r#*r@ zI4=1R3xedLl1(xS)$yB`SWU_(A|ogS<_veo2}Gw~wGByO*fnvXN^yJt?jzi;g`z^j z)HLS-H5-YVCDo<6Jfs%cR4v4n=VP7Moe_OGmDjCQZ<@}6M@Q!!neU(cBHHMdkq@w} z&Y$Yts8#tME*M6$Us6avvbB*P`)ROr<+mOWo>Ru4To+~py@usv5&zhZE zMI(586L;H{EAc_8a#m*5C6|&7w@dF?2*)ZvDYI{g1*$*ul)E?iBA+!SKvx{dH?Q@K zZ;##W0k=4RX*z1_3{OaBigZy5RgVkPN~&<(>$fD(II zd0RQp;=S_O6mwj5W1w`sM3^f&UfnalsVnlI?w#F!{QiSZ)3hXNDXm0meO^7eX0D-< zxkBUkkc_z<^j$hT1VDN=ryp$9-I?{)!kKv!eYFpz?##Bg#_Hv0g2v2^{O%t(!2Kj& zm-TQ{)O>BX)%_?EG~EFv#wFSXIfiRuX!ueElP0WXERN>Kld@crU?w*#jYjWQxO{mzHWJ>VaO*5aEzIs$CRfBQ{-jI`%+uq(Dm^3eV zhV`~Msgx%W?8hdHZb0`7x9>0vdl8sEyz*wqHov)_BZ7Ut|N&8-Zp)VYg29DKBWqKwn3Z^y+ zvatb_FVFpY1`<;1sjIuud2~kcZf*G9a&%kZRKiS7Nc!F~&1j zg>fo=Om26@j*4s_v;REr^5++Xt!vohs#qB#WQnm{0~-6g6v`fgGXE?0NjxrN-@nS! z{}{E%?5pZ)W(<8jM#VL(GVLxwk)}3y=F$rPktO2hPB(aPpWoKN+QM8 zP+OZUY;C3gM_X{o!s?(sRM+|W`A8W)v>Q5qB})8%&ks$_%#0yx`}F0Dgo6VolBRIW!KYIr|qnIeK~3n_;DcvWDu_*yw0()%1RC zlg;eR0)$!wJfovPn|J05A8dqGWe?M9Y9~gY{urTJP_4>$p7VHIiV7 zYy12en^a1?KwaW$3fiBgPk-bEfRnAF{qRDCZ->^qFh9|Si3(bgzLTZ}Zdc`S?PnnKO$|ig!3hP zG9_rtfQK=GRxrTBC2CrS)1}+X%Zs&W?a@(DK&H8KFlA?QaX(min?Lf!`%==O}))?7he`rWk;)>eF|ptXUQsO| zJwHEppAWl<#Bn}$b!Aspl1gL}Lh6k8zdIc{3*>>8Ckx4%&>Y+P2+d$b)YMknwI8f7 zqq|Jx3&xQBk4R0%R@0KMre-Jzu2$FGjxrmqPrW2k|3^2_A2)>5mZ$K47tZVGe;VR_ zwm(*_!9h9S^N&#dM64ypLfd>VsS&*1e{UsvvLlGx*?-{Sfc-{NsBW5Ovk+J{h#DwvHc0Od!NkF3*DjRq<4m9jqIMkqp@H(7G zfQBl`@7%&;IwTqnVYRUG8(@+RGUc&$ZD4O{*E{&?P`=Y^MLTjbiPhBS z?AS$+gX85)ja^1Z_k>XjsiIcp*BSSPNWR&%wFoGCsjBo-8V6i<9)gza(k8x z*3rOEAA#co1LzKazqx9?W!ebpv&za!WJeKjUZA6lKt9E#(pwQ4lO);FkpUq81mh@s z@|hTpHXwsxRg_e{2#7RgBgNr_Jd9sJfDxM1b*imfVdpsiGE#tk5s+Hqg2WTLAP{92 z?|FZ~eR`p@8pM3B6B10IWFliX55cQVBQH@mc?wnwI+9VvnPUA@waEKdTLHV(dbG5f zQ@)YYW0Bkmnt#xMMIzgXUritAjs~aJ8{3&qCq+f!D|u*Frm_xnTG#ZX@I|MK`iZcD zaR7EY<-;BXZtWOE7f4%LbQEX=`CM}U^3d2jgPN*jGRL~qc=?pCIU;>PT@)=3Pe$G!2@4)4F6FnmnG61^&8wd^|Z7stn}bRA8Kn8F~sU?YX?B!yIX;riQJ8f z;(90Ka0?1Df!7e%zD_&{s!L}H3DmQUi&Gh=Z?6^=%`RU9pA&39&Wk**rHt?YKE&le zPIzhIhtS({nZ3c#3aCs57dLtB|G+fVuah!FEKGiBX(u$R2SGP7BODK)=5@YW^bUal zf|C>Zi{J}Di+sAs?$-+Ejc_lY!n+KlIG|Gx0)!nrI#MhaR|EtED7egULEQngSKfjs^aCPK76858#(6j6I6ACOu{_uW zp#8zH0~@=F3JW>lgoHa<@6q6_f%ea_H<2BPh!zkpZckdhV+5uK^nUrp#UJ4n1OmpA z;r$C6v>fH;{dB-y(z`${lRf{_!cfTuOb(T#$%8W zaN10A!`Cc)5>&NgGzzfp$vjr}Kfm5^_w+;_I)WTUIrN<%M+EbU>_M;W;{$&NV&Pf1 zKSmms>-~4;z6^vVh6Mh1wi8$sn3NLOLrp9}LQSK0On}AGvMTUhYIJWoeR~CpAYzM*&MPE{}wXP0u+mv-CBqoN!askth z;shcU&>1W1=u9mL_?_)PR!q8!4yI)9<|YFq9VEsoj~?a7EzHknRjoRJj1Tru&$03ps!NCTwYuA*xfY0mQLP0@6 z_(R~d*oc+AF4&7{Yis*bJ_n}w;1XmpMX`XIg@$4^c7fb2ibfy+xmiJ?{pZY&XO}Qt zPsa5jmLa*0rjyNfB!LBCY&L?u3Gv3#?a{)IA8+>sTm~NVQ96f!07yy&*;5Hx*y7)C zBuDa+_A#8;AZ22bJ!29r^b6}jhDam?F}1CoJ`$HAz5R`~)$aoX2vN~yy50L_!hh`j z!2^7JdXX1!5JQh4Xhk_d^9bVJxo>q&$iqC~G#LZ}1nGd7-ah-iyF3WzWI)I=futcc zDykC{BTBB+cb{NEqJ>Kbt-;W`78)M@^%xr;fAb&}R?;)N@no|ejtmk&QdQjv>?!L- z(V)qeV2U+b8OS6Ppl|)azn}VCj3WxVZQ(2uC~2~)tI4#qwA|rv3Qp^B4y3?&%N!Jm z4tpyq$j)B~!3k`75?Wf607D`Wb8~adw{Eo}50Yv2^F!w5(9r^-U?d^<5|moNuzUad ze3R*_W(zM*`Eb*xTLc)HxfBTCg}7&dqlzbw1pmnSBQw7fgZkb zS@FSxPS{b(1_lK5^z>;9(2s_xG=%iH@Yt0a&;OqQdjW+0{r~OVv5LY#6a~-+A;D4@ zP_WaGAFxU%C`cMBKfn)2VVlk}q*T%i=`8K+En{IJDXhiDAJ8;+i)F)xT~h4o=q~q^ z$&ksUxD4|k?8G-2Qp5oyA}B_h&5%UK zK6>x{zW3gLJjZq1!5N%i?7j9{bIm!Ik*X?>@vy0|ArJ`O(E>zTVga!? zadWbBbhCS9%HUz);`++bfkA+qms^07!P?EuNt}o0e|~}6(Zz}<1E=T{1i}D$Dlenq zmA;(;)z@&pzA>`Gv<#$txTD>Pa~C@*=N00&LsF`deL!kKfvt!uYy-R6|aOhk4y^8yDNk&JeJ5G-v zf4(b=P|#H9J|Hobwpbs%v{;7(l1$SliND>C{bpWAgq*V~?#FrhY^4kxxH+6U?CIv~V}}*A7FVm$Vi8mAO-=aO4Q4|*3|X zyl~=k+23GQz?;=6X~yPx$HvkU2boT;!^#a+3uP4vv8J;*;Q7PJv=GW2s zddvR16v)enXAJOKmshizt9M|sK_Iv--`Z>>QeNf>AWQ@Qtb9c{>=}lBfd+Pg#`MA< zUi6;uWx7vrK+AS9>=SZ9MU1uQw*E zm6FV%Z|UPwx>gnq`G0%jp#8+|cz$au)@CieoEk@la4`&Z$Nu~j+BP9~L-)j@5mUQ& zlh#h2Fr;V!JQjx-c_f3eb&8n}hiY8(pNJ7&1fLg)ktbZ&(xCmjk&aBrdnz8>vRV&i z&#xXh@xTR_4n=hkX!BjhI!Z!;YzT?#bC$UOd5sY88hXq6ABxbD9i5Z+inIN~HWo6L`pKbQ^`l<3K|}tJ!jCDY?es&7-%*VU+bl3wI=0P`A>#g9 z>w~MQ3IQp}S)$KF$y3Ap#{1-{PwF&wj5&fB^ie>FS9eY(K27dXGqmNr_wr>~1@z3# ziiYD3gCpXfozSigVsJYXHh!jvQpJVXsP;T_)AN!*{cmO6Lv5qGUKJ_TN%ek_SV|`P z`%TvH-q$rfpCxj9#lbFxB42NmL09}PjZ4A0k#J;2@0S<-_mP|Z9gdZIM|bGZcuV@G z@pY8AABItAJ$k~$nY%aKUgm!y3p}ajJE&rnv`2q@cyfJ{cOkVqYyMZGDO#WW7UZe* zrgkm}w0-uO7)9;_-o*D!AtUgHmeqGB^13}3_L@7{=rh`_Qk9u}4Kh=X5)2O1bzj!p>+R5*pT5`^dr z;!b$A99qSJN-T2D&U_v_Gm$I3$!U0>p~qXT_cMKimY2=nCnuX7t_|x{S*z8G(zPp2wl+59p1X$T=H?FzG)!k|oojEdFDm>lJck+Z%4;g2KYGgy`0SRBLYH33{&wl+bu%f+^{jO(wM!-eQkc zAH8{s(eBbkL19Mr#a%q(sr|hYEnEKC$w_Pe<2A!XD?2wmnz!mMbQF5b-MT55gGvy7|OV8PPL zN{&78E(=Q+FApPQ>lW#b9D!8Gvt({Y2?=VD=;1Z??J<-BeGahVV%(80&$|!TNBCS< z`-DgCWH75P#66++o1(3bTL_}<*+HJcfvxQrQCFzM_Ph{#B6d?#6U%|GpP!n+Y+Zn@ zjt)gvJbgS08s;x!251mFL<8m1r%$!~3=1@J3VRN-UubHQHZ(Tsm0a|2l+A+|s`c2W z+SuHzueO>=v>1ysH8sr|wtkb4V3v}cloVE5TdTF1mz!&XXz-4Ri3v$dqfX*{859+T zZEk6)Tg{`FK$s{0;sn0+Q&&U8nlk`Y%p$|<$XOP%McEv1@d|7v_ydYYxkeG5rO9kajYya8-oBZSxtLJqGP($6`NmF zBQ|ubd{#$jyy^c6O%9p7?!mBKhmfY8z5wVq#vmHC6B>G!V-8$2+spbW%P# zjXZh+oH^bB`c&Gl86x7$#^fsZ_MFVEteDu@d(5x*m%4|`jHHV^S9f>g9h=Vb#x4yT zePnX^4TXHXyo}q!@FvSmg2&*PoSxll?Couh6&Lz^5QecXw5iDv_{V>1Yt07s$$mF| z_sDbUSd#lH5J;&(eV9GGX5s8tX7}%vUVHAbmmkjCr-Ef#vD;ggMGC1@b8M=nYPQ$y zpZ}Yz_1r@*E)NDjef>&2lBX=^=O@LY`~m%&eq17lwrOV+sW9mV_HiG{kC}TB1e%L| z+ePrvAe$Ll65|AW2{V73f3vau5t$KmTWF@io22>roI@o;JoM*J^@j6}B4Ki30Tyq4 zD*9xG2z=8qQU^6PB0(y=yG%@crX5)Amzxu1?X$DAjEsyPBwJX=P4-XihUt^}EE7c3 z()T_7xdBa5yFryT(eduwoXvH-^tJU^7JoyzXKB7;;Cn;^=Ca>)+a47;ahCcud9e{9{a2m7r28KL*6LQO*^|Dbq{NUg)~h7Wz-QPS8CKu~;$76cG5O5HN$L}vD-dC*Ci2JHrZBqBuFxdUac{Z~IiCYF=GsYVc z$3M`qhJD`B((3F$t{OsCVij|cWmYWXLGE;XM#RE-A_=h<< z4fFWFc*#^#(PU$^01~)9@+EEAF-@VDxK~b~pz1&hL%i)L(SlBLdVCm}cW7t3#Dr8# zGOXlZTxbuX&1E^F-*c-^aOD*&o%`X_Siac#-BpjoiT`S1LaI^ker@9nh{ES>Hz5u#^n<_7O1wcD#01w238KX~ZyNQ$Zo z_lLHld+j*oe=tGoNZspr%8D^j@oj4AZ5|$;d-v~~4StlO5Ord}rx7)FWwQcXthDL4 z1%$sk_<| zkI*V|f8xW`(o5UU6hqL9Z&0{4^ow?7R`Y+oshOD>2tN!6g}58*gVTAm<&_l^kSqbP zDKAjfexsO+-%GA+|5j`5J2iXU;STnnt|?lop+u2Q@}N&3OdG0QW*AE;U}=1^GyA4@ zWwORTO*u3@kx#8@&m}oat3*yl6OI3w4I%l@YDGe@Z)9NKFmT8y{8f~cIzXWTcMk=b zno=EamiAj3I?4GtB_@mX`xh$6j*+BTk?nGc{^p^*eTm6*Z!b<{WTYzVozrgVYZH-g zaf@sEY6l4H?$R0APhE8<<+R(gFaN$<_==N}_X=sgMxoY$t*z}oD8!Q*`5dBk9_soL zs3r)0NMNB_^oz4ZEx&POTFo!fOdnP0H`iv(mNFCjU22 zkLa{}Hl4l|k1w8^?(eYvx7*mYy-8ehHa6^#`SYXA6e*~1Pj7F9NjoMI1q+k9VBFzn z>yoD&;StQjlOYUDL^%O@?TL~SI%I3A^42GX*fs*`%RsPDW*|&FNVl*|3o`^unw=xM z+LJl5#;UIh%TgL7Q2(^(rUZ^fsHFgaD&_4OeR%&q1iITu4jBT~9l#Gb*u{wTab~_Y z>g$i^>7I`=Fs&Rks=t-~_w~i{_p-jelEU`WApl_CV`et#ilKxAQj0i5Y>XN5W#|;h zw=7$4o-arJsAu|H_wjLR0c2o#aK6T*lrBmHpnUgyYcL9A&i{f-A&%zN3r6jnv!bTv zWlhGoT=Cg6HBC(nX=&;9%ljX_Pz?_D|&90`GGt zj>(i*h<=2Es!`L#M3@Gv{ts-NKQbxMupzcX$@1JqVBy!ybXSRiT$&y(r>Cd$ z*B866+P99)SL1~lB{~(T{HeH|ot>hdJ2aL9>5(BJ=pCJ%sl^kMlM$~*@=O3OIR&5v znZ7bIW_OM|z0~g?AG8NtL(z0G(|Jl;V|y^=Pd_Bh5&d_+<2?s~c}#S43J(gxp$hh@ z)%=2jRuIQ;-@Zl3RgZLD>RJHL&<&19g~zsTf;-G%HVRZK$&(q^FmJgqT)ujP43+S% zE(NS0NVP6rhKd#cWSq(ufwN~8{eqBoR+$(+-Y#*ek~jRg1lo%e)aS$F_?F40*Ygh( z-Y!#9Q7I`f|Fo%o^W>g^I3|>i#H?5|nG$p`E=sF;oi0i?)zjs%N!_CeKGEMPRMgbU z?2$Uqk2HQtRIy#W_J7(G(8gqlKTj%btfF*+*-e=q{~CCgk%3pB5tWic(cS=-h(^Mr zc&zS=N`~w%qS1L!>I8}ABjV$`FLs+9y>uqhzkcPHJQ{sLh>jwjfsc%v-GG}5?u+Sn zPyPJxP)_z%^is&_16M;O0K`vbakS#N_<0z;Z6}DqY1K|Qmw=BK=X9{Dj1@Fgz+srE zXu*Gq2RS`I }c8>^crGwOcJ^muO@HfP%Np3UoI){_rCkyyCmpH&WiPp8S1wc6K?SVq_Gtv$Io@4o~yA zr@TFjIFYyO9{mmdNlD|Zq})1gxNWg(_SHi2)k*ftr%!JuaT{QRm<|aEVGtFi@b~xk zIvjqCHwG<{`P1W69R&Q_<7}?4|Jmw~(4E&xEbTps9BNuxVL0U6Iy2!q2?zWI=p+|Z z^rF}AUs!(CFGg(30Bq>kOXwio?-$ z7!sa#5F7NIQ2tj1g%hZ)e)(fIpnGNz6dX05isosG;4|wozB)hBskYIqmr!TDlOo~C ztp& z!t`crdmKltiI=ar>2xzLh|V!v;U+KIb!2x#kVHY`3e^kx!_09Sn)-qL^V;tu*Tc|* zVBz4j8{M317uAQLv~){o@nx$vt*`@ zyEP1sO>U2O)6;1x+v#)~+DZxwzv|rkIXBll7R;Mt-_wt-2dRB&=Klc6`E&-t8_a6O zKAbm2VN{&Fz*YZn^NezSg~evKh7bycVfIm#zy!*tA;rvWII0T8-CpJbF7vk#Rcj+14vRb<5ke*W7)8cQo+evYL>uRS_50V}RQfU8XAD z^?LmknpF?!?Vv!bu4DOT$Q4Hu6>WP_xQm+FFG+%-^n%7TIb@^)=9o5OMmcEg#rZ?F zk<75PYMTcC)56Z61k<*u>H6FM-jn0)lXPyQJF86HMqMZJBh>I$q3piCbuHN{qN z$M}XQ@mspp@d7Xr?}Olm*s7^#3!pKr_F;;b{2ifeUKb_sQRCq*7eq&h4?AGelL(Nd z`4ek>xfJ~7 zSnm=N)QpVc&(D33P zf`AhIfh%d$Gcz+ts9UTD!w@He4$81E$Uwr$8=UF|chn0+jfl(Tp@<-c`6*|q;qK?shNZf_d-0z+@{2vUAbX|L z2vdNJ|KPEmI`_@J!GLQLKo!*fu<($VM;|XUa(uzqCi!tkz4+tw#NPPJ$ewQsWI#da z3A|{%ML+*3{z03-pro0)!+kzJqF;lVurwMI#gHVbpNcTPj zQpBxkBJ^QoU)Rte+0oq%`>?yS6IUm`k6WHIW|KW;Lt8RzBEINn#bd-tL1vRus=3*c z)x2nsoIWVD<`I5Oh73}ummaAT;HRQ1tp;m8+_V!&8xLj4Ao+n=X6-5`9ai(7zytqJhx+)uZq{W z`>;ePvz|*+?0F~XITh&Unz zmvCQ@Cl@<598BV+jwycs{;jI69+Z=FSHIo^4?ye6FM8qYD&KN)nc5f~-)gAGwFsR-uDi%cKwk zn8h&XJGzSi3Y^RZ%sIHw=CaK#+T}{tZOrnt;EX-Ei~=61zNx?lNCJ?A<@qiG1EQ-e#?-ry+S}jw9+A1>ib6k8 zy}}rKf_36>6Ks$r0fRAtIyKo2H*JZH`q`|o!TISb;#L0lIAfe$Ln%q#l4fcDzVYT) zHO#q$yM5}@)yj2%JqC&fvWKPnsP<`i9(hwxSBq|0o3bS!W?%Ah-g%nLo;A#8o-{gt zc*GvFwZ3W16~y3RrrFZp)6-V`vJ#gK6Np-|3TCVc+)06(oZ5?A6^WktFq)Gt%z1cU z8YHlUEtG<29W64Wl(!BFMQRO;<@-*Ve#b9y=^xM}CMNFv{ta3r@;C3^1!c>Hn*w}^ zJeJA?r|EclK7;gBHeQU2>Mv>io30r8=~SUe-Sh831esv}cQ21zw(7&XZy$adW&mf) zvGFJ=zvuHvHi%=av_w@7UPY8tRaa>25V^btZ*e3lSgd*Z<0s@}f z4zIhZr1E^NhQ)_d(^1gar6Bxkuxv`U;h}%;_U8l|`K&X`y~;o)b|m=?2+)w=TLg;$ zKUIQ|&C(-ha$O2uV4662%dI73deh0D=0Klyyo&cnzynkYg|xrqvWpnx%6K_7HIKlHUC2GJy(9li1Y*->-s$=67>% z(sT`rT0K*90kapeqg;=o4fG4ZF>hI$&{}{t>CF#wR&MStK$utfp1DLnV)fjanaY0n z?AzTJ%MQex6gI^H$1ipHY^dx-n(qJmKx8@xb-(P(?cu^(h(>A&kFAHs#>QVfG+w+w z127v2PFYwmA-IW5>Wn%(JPewR*x1;i zbvHn_gcUi0zUf_bg(Vel)8l>$G_&GoBS4llFCpmf`PU23cN;0}7#JB5M9QP4Dp~4q zzkO2YhO0bcQ-U8KkP-Zj%FjIl<92IGCZTzPnGMy997{sAIE(8@jd)(^$1i!&w?sr> z_!&U%c_Al<2BG)~tvTJ<7i9rWn+zdC!jIa^2U8nB0lM0aNdr7YGt}GLn?lUxp024T z{t1A&G*%9d_7~5kFk}cp^}IYgsBl|X=ZI63oIJ=5s@Dh#CkZOwA@}1xDe6m05o;kp z{4OOBm;WZ#BU_$cXwJvO*}X??(Dj)Ye>nScOmD>`WT_Ccccj)H?!{^4?J>wxn?3<`RX7 z<8IQxwsQ3A z@w1~UFX4O23xGTdlHl7=m8$&zbVY)H8hUL_%~0@iazgWYk|yTn_l~!wf`K>!`$k~Z zV2^U@6~8%)HjMJIRzoWsldFBl_0eg2%147~mcMtxf5x`Fx@rnUs8i5(64I5HVe6@? z2DS#@G6NZE4GwdxhXVrc|K2c>CmbQQI8{~SlqPWK0do=wdbkwaKnHyB3VS9ZbWTd5 zu;hRSRLCh~5pFXzO^m9e_ZQgHZKM+;TJC%SwdJct@BGqIFo$+2gS0d~P=;y&_u+H6 z_8QRKqZ=9aGqptS?(SIl_+7|=cs0TT-kA~nreZEJ_+@Z1va_?p0+eNh@7?RzyOX&_ z<0*g6z%75%^5T=(q;t*pRt5Co#_~eFlrLZIBNY`8dI@L01~S@#KnR*JE0NaYf7_#~ z9pNR;y+akA;;x?EBQ*S{==0J9xbkImbQ#lRe^<+3f8fUtJ~&rjA;%2hFjMgg0SUM^ zRcY(}Fo;L4t>G<2c7SSuS&IjIkqY5VIr}{5+u&a3>tCEVM%k>Zb-{n>;_ldk4=y+n z{ulO83Ef!vyUsm9?hTE}aaaPEzMLmy+_S~oqaOK|c+>le(9u9u*Zj^|+L}f9s6Nvc|KD%}#18k%uN|url_GAYC12v3@3R~ov@i6M zLIOem<+X`e!@qi)dyF((gxa=nf%M$m+`M4*Ot9pabvoa3bkPB*nkhw`VL@OV3TT8r zlW8#VwRLX{zWAq(?FyqgsHzfTEZ=3?EjIu@`lW6Zc);m0Agw7F9gc zXNrtrp->3}_DF{GP+_#4h4yef?b63_Gz%=Fq-*23O8^K)$%(C~Zc=b3jW25WlPWwu ztGST@^%7LvHlbWzlQ#59>1ayv3flWRZ0&x*sQd0=O;jD@?@?nmR?s3zwZD!2B_swH zbb%SDKI~H?y&_iYbus^_t=spK{O$@4(v;D%E7uL{-2tSo<;Y}@IaBkyrazme)OP;Y zdYqWE%w&cZE@aXp4{Pb-v{hO5n|@F^vpuH1)epFExPyWC;BFZDQaz4Hw#{fxRw0VC zMe@Q&o!*uA|MI!^9fdTdl@IUN9C-E4?%jl;n>~y2w+p2$c$D&Uj-LM&9Hwcyw}Xo$ zV}*ZKyu*e>MMddq?ro11CHIHap{_*Lb=`DeENsT8&lFB%RVl>o^2~IkpUw+UkpDyf zdRE%&R;pWAFq82Z;y!==9N@aF=a~;!Zm=d=Mhv3PIX?bsVJX$2bGfo2oA9D zT`n%XIo|_3Nl8h-Lvmm4-dytljd34Hp>X)H&4(UR32M{dA1SFL9)>oJPnG!KHHjHb zuhyCV-og6wtU%#HiscBvz{~*l1BeL}4}1X=x+4ah3uN^Er`gJ(b*LGWrA)2~#tXb= zug;~`nN@|X`${=jcH=-y1r+6kyA$yUi^sq7YkS%@%{N=C^;2jG#bx8um~hT<*zzH|M`x;R$Fh z)GXR!pCnov@AOdG`W24wKi9Q7qN07!e5Ee$+OcMj_s>hi%B8Q?pND1M2U=xuG3V#c zpOL&3g``(Gc-u%#G7pyJLDpN3s%)B4b#qql;6WWMXAQ^pVy|a@X|8f#44lQIVoP z36>mms-o{ltiTzbbISzm=rkXgDgZM^itsxzsQ*C}cOOvgaDBm@}YJP|6Um z?q302h=Q)}hyGmgXD@DhTFLc+b~5F?=JPs@*XwC7D)uUI?-{U@m%x}iW)MmLLUk@K zuIfnwWgvNG>hm!qDrWE--SgP3Rb@UuhoJ$1i)`JiU>dd z&mr1An)UVdKMEBgql&{=wLiRArnh|CZgU54J1O0!Eg532EZ4`d=Iqka(y_18)4Iim zjZ~l%Ri)7|(y{lU>qA&e)R=Cxq^zieoK!eHaG>kC$$KlwMmb=kPJ!MOF*&INX#w!O z-2ci4#LQwej|)+CSe|cmeSLkfvUr$)*DbbLwH*GNnyDoy&-_jbMDiU)7$npyKKr(n zwWI65tqta_TT97C{;j||un#Umg{T8t;5p|fBJ3~|@#YQTv$XjrBn0@iXZSA>W`DIB z8;5eileDmx>(UoX&=q`}QT2NVRB2!cvHE8L>u1U7fj9sJO6jbe98}POqMG4eU7R2> za=!)EK0mXrv}G6Y9=O6h0$wG#U)7kg>H5rEj zp}eP@@xbXjR{_vwzIx)GKX$HXXjSj-?6G^eC=N(0NOF&o>s2;_pOca`09SPRdPPXSrUpWYvpat|SZ7Zhm%tLY1O&JwwbkgPm)w9zyQ* zH`PDp);6B{h%@Q-Tiju|WmR6J4OltBq3x*8Z$#-Wa=!1hu!_KO0Xlp*Q zb^Me{MeakD_=V?(b^L8E7T}J<&?g$oXjqa>^s>?lxb)MdKRgs#b?UVh@;dot>t(l# zap`-MWbo{(+m~Z9G^ERv?gP=lYX&R|k}*GdC;7{jD6|i>#%* z)V0@2x3^irb4>*585+ikxvm02o&n_IdXH`XdPI#qoY(i%35ZP${QSg#vX>O3I{2h1 z1Zv)?DTgD0$n8}Eyv`bL=M)lpbeN?9mCCC2Vr1FZz1^j=+Lt4fD}E@Whz77)`e-{* zq#U4=P}SFu$&m0gJ^Q^P8gT7fbOflF3Wr&7qzUHcYB%65Py++5HjTC&GF3Q@?6$Wz z7s6IN6&9CnL8+1GRgmKHVZ=?1Pespde_+DG*cR|4LGH-J?|oo}MRK-R2H;pSs<08|dKb z&zx!=TDS}}uBb~=&_|@AT{3ujZtn33HZt)fDoVJ0v~yN65PJ^VJL4D`?qtt3NwM=3 zVC?}tYJI&#^4xtr+7}}R)f(*Ed%G_T4L_Q7tA8ID2-9E}0%ZJ?Cr@~xq40Nr>1X2R z9@ql3BfnDS16ri3PU>VPm{!7LvM-hV{SyE4X2|mf@e+}1u@aGUA&&hQvssVF>joS85NrJi9qOzSg7AKNiFfiG<Zqyf)KjF9(Fo zqp|a=q|yBSj~c! z|3@dBS^)PoHS?GhG&MC_;qdzYLEnRZ5wC?%a;LQ+M!=EP)Du|sk)SNw;}OfWeD=Sl zn7BBe7yT8b_Oh&brC!}IoSZ>%FPAT5NvZ!@@S{IHxnEQFm;R3lP` z5!b)0Iov<#c*C2!z&<$< z>>rA?>0IE5Lt3;HT?xJDopshWlAamUmUfT>-tuPBf<3+L2UtdIMRErxbrgqw{$=kc z7SaxKA%U!*x4f-MRT^~FxOKuGQVD}q$~?MvyDje2r`AM&myTkh{uU&wP9``ehsuwT zTb=QjC6_MQ=kG=Nj}0G5HPt(jKnT}4y6532UQbepWw4!_kDlyxg3=|>WQ7mZr{bgI zqbH072qw}3^zMeiRxt2&yc^!?k z<>s6Rj#k}f@a{H*W`_w@ccJEK>-bm>1xCxN-I2Vrv%Sv!$TWn2nCTFuR_&9VRskQx zh_#e&JqL5mNJIsI$Is6|HVNQ?!CNPg@N;eI(>7#g7M>>ZzMUD12y_fZ2CgOULTy51+@6=< zRgHV#Pv`V*SORb8qyt&4k+Rf#n8sZOVV&2L#u#)o#Hb~43yR(uj2Lg#KFhiXk1q?1 z;pu{2yW>ZLA|$ZQAm|kC7f3BAehDCWu8=*!BR0(Reds&40-DV9rq^jgRB1I_Fk*@0 z#pNFf5wWot5V`d`R+KGBr7>Ek(LIK-EC)dtID(@Y#urc`6WFBp;agKne+Sco47Yu1{L=fA?`VrKd?_CIjFSb1V5asPi}CBk z#KbHgi@Sk&nII?2dqIH@u0O6V?M6;H%NWNjOqYbgTVYgSs^R@aHEkeeAfSnBwWeC0 z*QE_*SxmlDA6hBW2e$hGp9&SQ5@e1tghK%CG+s(N&9yuf6co(Lsv~-75$kndY;M?R zHclOfY4|11KkKID2*El@%6^_D;*m%8fi~(Z*s5#sZ!l+tXgLcm%PPC5W8J*7b1vA} zc%3pE4NR;0a(YU;avQceHSk-jv~MdO5_s}lUrsYxUmSwYR{t@`r6BOx)fIBK=GBh9 z1FPaUjTRD51!EmAxH~HCYaQ2B`|M%9O8a^@d9P#`m^(>ksKeRDd-T-pW1jHkvKxxS zbnPuza?EZn`ut~$pWgE^_sRZxbpK`z?eG0!Nag8| zq>xd`%(=JzB~x5C`vvF$kZ!G#ZV+x}xPUtItWpU7YsUKfO?IunMrq{Gii&hn8=&bH z78XoWxVg9XR?WI$}dsHxOm69F!j1ax` zHg+wTdP>jw`$O{W+W)tUi2E_e%T7>l!1aMQ2buqYOI}p>2Pg0fcSjI0F(mg^nExP7 zumDDO5Otx+$>d1R9td&WPd;RG?E>8uX;7`_NK{0Yk~k831waKSap{GDi3s3JG6U{v z(0DeReA9u$-@#Jg)DSXm{fP2%UL-mM+;*CXqcCUXE2seZLUuTvDbnFyR9b5J#=QBFEA2K0cEEDqwnvhHD3Fs?U%s)yt=g1-gLT%qou9g ztyQZ3Lm`%GMRb^~Y9%8rtrx;ut-CGyO<1)+!w!(IoFXF0uc~#A0n^HttNsIU(2r&j zNe!Jdh#0=4w{Kg4A)nGL#-P^eHDCeDkyd6H>>0X#qGBhI#iu%>$?*=rD}2`~(Or4S zvFVf)2=92t(`Zgyi{u1^0W05LrI>3bR7J0+q5KvDFK9wQqqg}0_Zatd5vk- zzdiDf2YPx;0V}x${IW1_XuZ55a`FMluY7!bVcx)241jI}T1oM*vVbcNR5Ua`po!B+ z`HT*W(=Gyz6A9{+IPPE{ZcYrontlMVH?UXf)VZ-aAR5-KX)A#xjtN}P+1XiT4AI}1 zM(7Ha28JY{F3H;2akjO!Iqf!{OgA(d+?_AfUL9LeJ)I(>I7+HF))Y)p1Zyb338WjGppO%#3TPY~LBK)+?9&cDM6$tuws7V@W}IR*H#awRrbiS9_&CP``@t$b zdG<^|X##*R(2=W590xH#2nUU)V)!`qYLNCgB>c*zOPPe%bL}QYlr-w)3|r%F+l(C7 z$i%(8)0ZX^FLgL1r6}u1*}x%uL16N~D0Z)7#VIQl*Pr zsG_1GGbE9uSL>AB`t9@Qkh9a%a-QbPcVlB?6~LWiDEPde(iVO(TJVCCgCiOQtOqcJ z9nS|7e2pZzFRnY$Lh@X)6~uXGVNsFtYBcc1{3dgSl9Q8H&enTYZfw}(4O>sXM!SW) z?Zs-Eqa6alP2vO|U=lblQ^ALuQ9c~yKm$u(=P zN}y?m$r!0k?>T~#6T{c#f4+WS*ti9=#slTD2LC4yf zRS)Rh9GskPRorGV6(C9QLzK9a1wv&_PGP`?*p=e-HyhN08_&8=81zj{Bzr@lm8Yj( z$C)>m-Q_b$+sdCpiO^-7+L&FCIcxv|a2NxYifRipR?s~9|NVzr0p~^e%LRe}r>d~o z{QUffPo6}WD8&Ff97s}W1mj-;YcmsYb0s{>Vq(NPPb_Tm;~g^~UV<1}z+6OYYio+& zD`H{0Nnks)|GJ}XPx^n0m~(j9_-RKL4h|ONEZV>U-_p`BeO62 ztTO$=z{9nSjF12mxtx!WI6pr>uVo(vxFZ-+D@_C{#9TaZ)R6(NMACD7Mr^P<9?TCO z&Bvw)XOHt&(hop!&(p_Kvk~V7zL}BIRls7S9TD-sdI@Mm1E(sj z%%-beiMp=h#?gocfoFdB1yH`g_85g!yTM!+QvWIkb*JxxP_Ox6eiG8-bh@_?He9Z& z4kR|LOK%#Z0#(iLYxC6k-ZU!rKX86Kn<53vUyQvV?)4 z8{9hPHmK`BmoWyCEXx5HiR%aYDk85N1lJUBk(u)Eg02l0(%aW(4y^TdVE2nlO9#pu z3NHI@(fb;w04`*X4`K(jzc6pT8aos0(44UcSC^NQVC~^+wV+K_y&?hLX!z_Z&}{hZrv>E|6x!f$YS&)_kIc-NfP#U$3F1D1{X2jp zV3;gy6zML{Q%W`UxB!odVTKE2PW}SH3lK1R#ovK8cZ-04pt2X~g#tP<@<3QlQ1Y8c zEc^mADx?MYwUt-LMn|g@>Aie>asctGd|y@)WRWcuOG}pX<851yR1Nk{AY_7yB7;lG zj}GFQ4-DBk?$=21@DP9s*U|DCtpW_WrHi>@X8QiV4e>)%-xd)ONf;EuxXOF~ni>ZDWkl;iEo?{e?%lg-;OmeB!(Ta{J}F;6l9$(+0nDpsqnKp-p_&M7KkOq~`X2{{)Rez3{W;8b|Nl0d&t^FfA1KMdp$YIb(uG9V))Y(bVWV9Syk9~v_L>TnQl z35+FnU^IYJR5ay>IX`f-NgspyNi|+lR%S&Yxd+)C{&WkirN(ZmjlygToC6CGseSG3 z!O9;cQ~j>be&Gh12>QDcqn*6{O=zkZwQujjixU! SfN>f8dHP62zEsvY=>Gue>X=sm literal 0 HcmV?d00001 diff --git a/docs/images/pwl_calibration_price.png b/docs/images/pwl_calibration_price.png new file mode 100644 index 0000000000000000000000000000000000000000..1de26c28118aec610a66dc60055eebd00f102749 GIT binary patch literal 19095 zcmaL91z1$=_caVE76Q`Jf^n80s_({B_-W8pp>+9cS&~+@!o^a z^ZUp5y)W0rC|t~(bJyN$ueJ6GQk0j(cu4#Z1qB5|T1reA1qD?Hd=uQi3x2~m%tj0T zx#KJqR3$fTQG>*&)|h|2V~UWXCKtV{1Ag3l%rW^#AG`&M* zjV&rDsAyEUBX)qcmn8o8#jjEzRnVifWAr0db%A(hWAW1zw!l^9gugF2i_Cf~QsClW zU1|vmEpBi8_qU=!lT4eA9RJ6!h#aOrSkzGWd%YAi|K}y`_L75ofmUm=VKca6lkKUR zo*3BKa_rhngMgu-q2TG9?=hV}4j3sTIYxC!Nr~V=Hz)G9Ju#EDP$nkzLKDRCmWjD} zRD68LK$gt(!dLoWMSc5;^*W)9IK&9Wd|Q>z0{lpFEJf-2hh%4n44EfxKXQD~B%uW5 zIAItBdL`GU+Vej_RI{(#y2Bkekce6>! z4D%Vjgx+{m$heJ}my!B#U&o5gLCr;y4*xy%%4ulw`uh52yUhl4|B!z?X}JDm%0pfv zqyLI%=HhxN!P!Eiy`VhRFTuJO_p;c8{Y=;7gkhmYT1pv@alDlRTAc>X8j zxHC@3ljmgC$0w@YPQ#IEu2WnU&+mIAF7j%lhDC=v ztZ0OPoY-vJ<+$UWHTx5KCZ;m`NpCC+4BL|uaVz|H8TO9R5j##Y<36>YDEeYltV_$+ zHBccK1mZ9AozV4{AOvX0mGhGyVqvDC@hdOqGV6{*eW9gA{QUXzP^~K4mzn5A^bd)| z-;naD{P^JvU7F1J96b%CZ~PdK5wfd$rZgSaS{ z{^LGPF8x0!sG}wvh8*$ii9K;WW>!{Ky9Wo&eSJ!cSBM>RusM-Cf?Viru~BOm zaM5a{l^nr6d)q&p@l>BY*dJB4EwA(+y>fX! zGyn3F+#~fZQm=+EHp%~7mwyVMNo{w^xo^Bm!nuQ})yAe$NOr_bZqhuJY8oV!l<|rPFN^+{~0TD zPKU9)Y_Nfm@*Nbjke_9f@{YT*{3t%t$*=lY-&2eGS6`b@<48wYi0N0j&B9>?2`T*} z!_S^ReZi7NpoHhAB0jv?$Ff{EXxj7b;UTJ)f2+TfZffD9kPT4_gUKriwV?#}&3xP> zh6>E5K?8`g_2a9L0oB)({%k{z){lD#Tbv1c2q(6^_Lrpnu_(3=xl_C6^MVSnlIg}% zl)44(^(rY^QjaNw$fM&!IU~-doV^6;(J4`k@oTi54w! zRW1}l_>FeH$azPd&VIBL*qTkX3I%Nc%$UUD0f zbkw#Zj8(8}wqE0x$ILhUJWZW1sXw~pN2yGrrz+8Acgqy>SURAI?EUnpno;GqESqn3 z+sSd9uIa7W&_Zr8xtcf@b_;QqdenwZhWrz?zlJ$yqgO+cdyUUBry_|z z$qzhu55YL_Z>@ECRN-~|qMSMLi_$kgM{;O`L#@Kb)U&R*6 zevuZw9*Sr2axV48oS=!qsrjZ($VSX>{-Qi(R+pj=z0MhBELFP=aj%bahj>(L4kxGu z3eiO$RjJ8r7|;4LtR+G>G1vE*NBHyp$DK@X>?t9>3HYUw7P){z1?;C&|338J`|F;s zMN?RgDO9(xi? z0p*q#ngQ96gK_D}K?O_Npw;Q(#~E-YZz-CqX;iXAEij&7)PI(O2yo2hHLd6eZ#H7> z4ExbHJXvf%LWNv$@ngSwcU1J&Fgij67A2mnS#KDR6T~*KcB^orhB{;XXC>~@SzqG0 zWaWl?N-wO{4{fjgK|_h@i?9{-{RtoWDl0q1|22{Vwfyy|B!rVpjfU~9ryMP*^IMzo zUn`3^{ac~)Y4bu`laulU2jXwvckP_`JO8)6Opt$f>fJ<$l%23%s(f{cPZf^{bJ=`> zI9oPWu#r>a`P9K|qd?aWPSc+H4Dx=0b-aHL+RZ6Ym+Ork-EY?1pq?c*cDmcDH_fnNdL`t9u@)Gbd9v*t^;Q zvQ2-nTdWw@Lb_UA*W2A~nt(&glbdlb63^>)4CC3B;VYP1KLYPgPhB+%)qu<} zo|DAJU-#i(VB}nF_cFBTfwnip&zOOF$jA$u*cVk~ldlnMe&dAznHhVT7x-}V<1y(cb^ph8a6a*I~r{)d}CizJ4yt}kgbK4OTh-iB{X;r%ascAS1%DzPA8QZY=F&?-jk0#D=*{xcVgp}9niPGw}8c@h|wLNeo-WY z6*U#aHk$+Xj(&Ksu|YX`HP?lkIgfeQy4T1!5nQKTjxqrWC~p@k7%Jffc-Y$$Ki#f> zutoVI+jK2cSZo~(o@t|7uy&8Ss@~@#Kw=RvC{5&DyTi@9I`VUFKjl(v|1+aI z36nJs{ET>rwYOSBf>XnFPFQy9;b(orrD&lVVG0-VoW(Be9+00u^XpX26&5|f(5{M# zq`JSWlFcZ7#yG;paR>Hd@hH`*=iv?y=c6&g$5~+{RLI*y22E|+X)=5bWz(g`al)-h zt8chswbEVI$$eC6iu02j=|QUPJYvX1g*-njI&Qm19Y)=;)suAz|0D&qdBp`#FcJV0GHbke@d5>at?#;Z zF_PpSEtXF&4+j-IR+HVv%FMA^TU$So@<;MJued8$NDuU?I~0B=D8_Uqi+Zwc`fSz^ zpx+2Y%M?pHziGDd2+PTpjO~YurG@5o&g1j;a@z-oT_SZgwu$yHULZy#Rie6*5#j+H z0Gq)Dbp3C69my~>l58d7BDmfl(S6Fdf)l^0-rU0833J_0gvOS$Q+8RqR`p%q_oW=| zrj=cto%aD!Rz0p+O<4r6Fi9AekdQ!(1d1OY;O6AzSycrToXy6Z*b3`e%%-_3!`&5lHD2L|%^k-}x|F)`NUDjfy0}yI3Yow_4$CXyw(UB}9tpGmL zvLaYrmx8J^VmcB^;Kw$IUT?EP8nK~)2SN-ewbX9EZAe)g%q}zOz+q%!8n1I>BjL4k znLx}XSQpL@Du~dqv0;U@{!HMuW+x7%rlIk8U4@-UTyEEXfgj1UVE>$$h~Bi&8X_$x zx15(>m~K5zm=RERv0SdLe~cl`S^Evl28h>H7!D zeCr0C!JS7v14mX&S$#BIRj%#G$p~k#o%E4`u953WdU5BkUKN@dW#`wDImY_P%0V!I ziLo6$?-ye(>eJr}T^}>=vylWzzDtd<^iAKKOeODar!okUq_SiKu&IR!upf-@b>rbZBWU zyuDR5YX&N9JACED5mh78;@I@+(_VynrHC(&cxk8p}Hlc%7gYg6!zXh8?WuGY={bAv)I z-I`QQ5xD=|y7J2N2=r*up8Uh|1Qm!f8G;c^5}|W7Tm9}W`-UR|5U)7Ay^qL93LB2K zFv%cGYLxovh7iN`cT$veO2;(mhp4g^>;^5dS~WO9bPf!2DS{r65zi!fCU$J93yT)l zM?d2Fy)nUibNlhvLH6ez0=UC2y+C=7mMV;b^F zeEk;JRkPkQpj?^;BPdieY985&6V`ozaoB4mMz$rfMMmBos5c_*gJ;m(cP8KE5gDe| z4iB_|&$Yfo>^5T^j2VZQxb5hn$fpYTAa)x2(#1pEmm(yQ^HI2Dnipyo08i)jm^X|M z4U0o0#IN(^%0(_V7U9tIV-yRlv$yrpJjmznIYm8tw23v9wZ`*zDx?C+0 zWo!WVCHdW0{}q@1?t;!kLROt76Ao-b!gfIAY$E!S_--+P8klXl{#=UhUWt#Tq~R)s1Niw#8Y54%qx-C7QGQ|6z6{!HdH$4un4 zw=4JA7%31gtIo*(;{pjd3S%{Q_8r;EnKAr`=CKN%OKx#TJSB@77%M{l$iy}IxtKI$Y4 z(ViG#%r3w6ekvzfWUT*!OJA{lJnPPK>h*;=t!x~U-2kcXjzN1E*#&?EUI#tA$TRrO ziDR5*I8&c^vS@Jb2;fe!F3*=6NuZXoBY)hH9eekMmlGcVmMOZ^RT-ltYyi6FEN&{J zpG;U6nS#AbO-s9fwwhXI*nIc=aIF~*LsW0qZcMQ*V-Q!`Nz%H|2)2027`X8e%UeaI zs7s&w1ry8wSP|3}KBuChat8MCu*)7Xloow9iMi5Vfov|G<~NNR)e)+!>_hX72OHd{ zOAf+)(AYC)>GljjDe36Qiiv%^JXtA@#zobZZ<}p|qaji^L%xh^owPr>^<;o^h{>+k zVP#{Zy!CAR$MfTNb!=5%K%KkL?5nz0m(;7=J|l*lo0^UrKI-gXH7G2MmSf<|f=OKI zw*{9dO>^?HDG!-Fd+B!{-g4pH;%*7^4p?izz2_XlpI|jvC|>8z4vyqG7Bd|pTde9y&ew4QTrqZU z9ks-8q-D5>9Kd>@Yar&nWoJ9Lles-6A=!BTT%o@hDnKPHdv))GieVZfv*nL;Uw(^; z5{UQ0SfQVzCZr&kEXT@#Q(6q7ODSf{e+I!oj0}U~dV1GT(`VLt+O_g3gp>Hu$>oV- zpRCp3`1%;h-;*%KvKc_(?XYprGsNFnXS<6btGw;bi_Do+Lr2~E+?6lAJmG%=PgxHi zqTl)2X)! z$&4rJ>E({k>>uv_w+v%^>S|pItDUGI?l1_)$^ZE-EA?>olA>2d)ikrZvjyZuyU0<= z0aOr-%}h;mx8|SQIP2J1vX|Ou=n$r3pw1{;#q{!p=RlPc>1VF4NKrrzTWnjpKU}*3 zZG#+h-K-cZ+SK-RoiRr|aTbo4SVq4=DZPzAzd<@N1-a&kW@GTQ;L}K`h%Ry#WGz_` z&N4NuY563Z#Dkg}V^KGg$(#6uiJ5t6CDFdkfcD5;A~Gs5hQRYopcw7`dza!|t#@z!azY84dBZ%;<`RcNUC?H%Lr1 zL5{&@x;ENv#v>qCCHodTbY1l>2Jy#!3p^`XmshM`hIgCC&&PATE)6x!dtuN)1m|;x z4=k|*BB`Xk#-ATXO}MN;j**2Q;RqMl!=&o^unL$bAoTCtx#IvBOCFfU|1Mb)m=gh6 z&QdicYGFYi5fQ1B1EBw^vnt54c zY7(njw_J^}wh7&QfZSG@czEJJ`ua++H&<6jSAI1b{I~_aesy&jgS0QW|La+1b~Z1g zy1IHh2Jci8z^T;^^S8`!Tlr^8(@!P$Ec#?VEx&dIJ1Km;g3)H&aq24C_u-%^_DgF92)zhzVlHvL00PTyf zot>|LHaGJ-E#2?v=y(U^o&!}`snhc7fKs1p#H=Q$eLRmhE%&NDPqt|o7&`Qx6NI#; zrQ1v%@FacxTDrHpMBhNMGc!J$R=+is=52d^Y@-bdq4ToXQzjya8q~1;-dnBc~5g?%24^5~(KX<|e#%FmFu{i8 zrVaVoW-Q)Kf-}8OqPw(z{etT0f)FJ!kGY%t;Mn@RNi{}B#{1~bo8I^Y1QJMPpcE{n z;c9QP*U_-LY!Y7t*n82F_KnGl?coCD-+w^tcaXBd#Ra{u$QiyosAaAt+Eo*~DEN4) zY!SB>EZavGx@+d)$HFEJt9j(i<7T$YcKk}+uvc#l?z?#Wh%~dz3)6N^7L?Q8LAOti zDtMXM2K#=@Uo$vv>@$FI_qM5N7*lCXbTl^cggex4ZqA^v@#?Kz?HVoEgc<-0fzppu z!1a4is))S?^Lt3Um+V*h=6XZ8&~j!sOp|CPAN_%D0n-bBJuWt?5wsN%(C@;?lSH0S z6ScHtB1?-)r;t*0b1i&xk4@cZ>ks<3(?*p**aHh3<1?ef&B_l0o1`RB(!)K(leh-P_{e#zuB z5UMAhw#f;}nvms(BRP&T$yA*0dJPHd>sIL*8S&iK1oriNcR|^r7Qk%3=tsXCR2JZWax4z!bm05M`+c&$TB+rqoAD z&OyY~`$Ir`3t)fa8=0^Qii%^JnFOv|=nTpL#7Op}`Uee;T&&R(yV*>h(y(Drn^@Ex zbVSi<&$4U+6~p1|b`rEXQflZ0#n;6cCaN<&7OZy)1N%>+AJgh=NDuH^a?G)66gL^x z6&7M<^jqxYTa;+*o}AFJvn%GjV=^L_<{EN^Kcflm^XYB3P~y{5Z0#dttSOb8T-Ese zcgX@)mFd~os;QA9iWKrZR#Z`WB`1eG@AIJcvvL)_6C{QK=t<68{ma{Z3RtTi z{Ss7;WU=H4EIQSnNA)~Il`N^o;CtlvV3Bp|;-z;~v zw{qf+d~C#y4(r=*{3$=HnCcMte5C9!YO@+#! zy0rvi3m%UdCkc6uz;`ys%PzLv8W#Z|dm|^0VS_6d2*}7e{#Byf=f|^dW_VHhN~7Nv ze?inJZbDz|t!MECjgV?<{IpqrMW6oi$)DF1FX_mQ&(ieBe4y5R);8KFPt z9EyK;&8`vV37B@n)?j?Wi`{0-TVNSYhV$M4z)RB4S`TJNbM+RTGbwM?EYZL3Xu52? zMu^P3-C*$sML2}HiqquR<8155GmiA5dw?1j+#oe|5bhOvy>ou*rrEv@cDj2Bg`K=S zrte)0tMb{|+0=EblcIFSi0a_7pk-A5R_>HBJ*qt(fHPWJ#Jn%|+9eRYwi7L2zNT5T zXwwb#H*fvx1%OpxBd=hSs3Ck_8nZo*9?hySO~0W5LncR8bZroW8n2Fua`W;;mGF+Y zCffnM;SOqFcM#B#^&XX;OaE^9K}+U+T3T^MO>?tjNy!`zEj|^ku9>8ePiwd{3yA;B zKiEJs^&WAuv{$F?BFb;xnCj15RE+8&xf;7zWnAn7sq}dkbJ)AE$$Jue{aLvrqZFOS zE<1%R$M~^-PYI-#m0~X~fSihsF5u$Ai&dw(2~aXX@rF z%2NM|YL665!UJ4dhrI=H6sl*>g2F0USJc(i+CU~BDpNu2cajF-(k;2}E`k&KA`|9$ znHH06_vN1`TIUBqKuB$E8?NuzN-O?@zbgn^6}Tt4m|aon6u$Jv>C{WSY`I};lHi7c zt>69q-LUUta{v}DwvveIfDKO>bVKd!@#dNuWd$}%EHzlWS^kN~O_`bZfe3fY05*YE zxEV2I;$e@dywQfy*t)aWZN}IUozcB3iuW-wR{+K7R`*40K+o)(MA?=JHUS!D3d5?H zA$~^$<;+N=i&FwdwWg*feXuAXLXicqlG2fk>b*O69+Q!U%4e(S4kQPjR!DGp{ire5 zcx-&`Vygb_XuSFSl?QYDP`jn3{;n-cI#N9ZQ8MQ9XOzGi6djd7S5`1FJAAWI6Vv1l zCuaK8Rhj;$yI&=iy;`fd16P290#sxBho5Mpf_56OQ>Lb-B(Q#qX;r9~6V!lhF3=k^ z{eh@IAAL5N^%d#}MXz!H*vJ3*X5}U!?+d=FnYZ~ut$+w+kCzo?jbdHXnev3X!3Au$ zLh7CY|3iRFwx`9{*aGTg{8;j4p{vuyM`VyM0|UyUqN04Rn@^C|2M`^Nn*A}X1YT3y zu7D7dEA+iqmwiOU@sUu^{bjZ3K#v)x zS=sm`F)KJ%rzr>D^VbOzr<~FQ(;3@qoM1hp`2SGrt_*XG!J<-u6*6+Fs$XD7Gu736 z92^|TLjXyZ&+&=`qxS4oZ}%S%>3n-l-19J1zcRD7=&bgG#b4NTlbbT_G-O`9m&IM&QKY%N7pIE?36XW83 zf&VkGveF6)>VVPO@)P;^jK&)5A&WHykS+!6sLPaH7ZGgV890|h1ER;Or@G~=I6%}b|X-6$@ zaH{RF%G3Xj!WHwkWsSANBcJQuiHg0bW{*z_zcU7{yauWmylTRdDgV+AO${4`brY8_ z|6huPjVfTEzW@?H3`_X*9g?W?0P3Wuu5PkWs!fT;rpjx`WF?vd@Zgwu(Cb& ztFMT+9j%GV$;l-aMP>JO*zjb~KLnnhylKyEK|1YSV6Zq?zs?>>^6jaqtgL)9n#&JP z0oZHMERC`xKF3?h@{&$Yr7(S9!l`rHan-sC_qli$F1UYxy3Wo17%4gY$Bg3|;w%t& zSX?ykM}Vg9hIu^dIN5Z6guK)h19748+!*1qSNdjScljLZe}m+@%KT6_$4Y!NB*Sm9 zJIpny6_~62wc|9yi;LMiyShlCh1_?af&ylFSY3Y^_?CLec^Mglb?aPfHVvB-^=(MQwT^22M2v3xFjU#O*PJ|PXyiBNqB6YK7U@)YW(EU-Of%@bww5?re|DS zLx!oXM)UoW6v$g??JIiYgQX){?!gb-RnL=N-M*|o3YHSC;fn7$Z$A4ZkC&aFaUVMM z_%E7y%g~)FlFF)E`#C;d+Rl!(rl!Wk%xs{-TK#I(=c+vm>-GPTCdwtSx;r~N`M}UQ z&3iM!@O7MfxZgo%?z%B+tZnGK-FRu*JC>j=zDOyZdBBM2SS>}m z!ap3=uf(dxN=^8J;FcAuw@}hfZhU=pdCX6BvNPMa7%tK)!`860A0RF!w!0jwudJzw zoLxZFuo+w&Pr%xVkIF6hd1a$xO)I zJs^3k(q?E_SmeBls{QuO9{}WWM8f#_&z=;ay8dQ%eTniS9uNuLd)=_YjLgj#9_~)B zameGp>90z(XUk>PA_IAme4F6-e4_|mQ<{Zxf*)AT7WmuU066~0y4h30LpqYZymxqK zRVEMdFnzcLWP$m2fX2|N@`)2$$nSnaYKm1oYo@pKK8?G@k6TY!m&y_gaCQp|MpRT( zn~l5y1xfb<<0Nq~`jc1)(1YgRKMFx#o{M+Fn_Dawykh^h7r74q5C~-^w&LR@a>z(e z=kvJ|?CM;c`u&-#``HM;eS8Wa&PV%-%1&y~t5Ir46E05M*mjB@OYzx$C zr<(8o@-vJ~V3TDuS-`FK(zn6Sf=~}>-x^;L$f~Dcpt+qUdF~iS? zoXKS5O;2CfrlnYsG4`$&CU6|cT5uDaeQVntwRgTzs({Z4dy{P>uMB6lIjaVb*lGSJ z8Uio0k|N@!V+Z~sIL}0tMMFwjvxg%Z+-7GnwKwMe%G!F zBh>eb0BAw@>eRS`Y&uviA8J{`tN-$5^bE5kQFYklHa)fYYD8zW87+fWas%MZ$0{|C zXJy#V)W;*~5EA=(>}2`&-)XPT09o1B8O`JYX!L1D>WI^a@4dCQNmXY%FF{dSCcc** zA%-OK3EA|hDSto;1(}&YrBhi8TQB{J2vxJ$h1$pvhunBZoSosk`q)8~P3Og%I+`rA zX1a%&T2U^UkmEN)4gl={v>)5uEsFxm`G+G#`r+yHUI4R*V!TD!Lsl|ip`GF9C}eJ# zLu^~+SHICx`KrQH+u^~Zw*YD9%0SQe;=~6EV~vq{COvgLZlb)L2IK$6u$Sc zT=PGREm*re*k4>hogQ<@M0ppEscRrs#475#66+qz6RQ3xJlw7JD(?Ob>J@W#Z9OUc z@!Shq{y0N?Uc(#Ta+Rr?k!R?(YwwL9l`}tOuo-*`FVL7AffQidX?Q&=sE!4f;G2>?O6WqoQ~G~ zSyQUTU7nDI)Mmc=mq}RCeq3E$bp}KlAV5xg?7f=-#fgH1==o{|pm+pzsHBmRQ3B-S z`sQR+-}drD;LJer@p(Ryi-SDbb|E_pazqy{g;Q`c+g)>DHKC>TFk3P;&oQ|EAEjpEV0X7JzlM4b* z8OJE+R@9$(n1(~`EEMic_bRr}NMNiI+!LmzrY^P|q<#JRHE>4cH%KX;oP zhxcPKV=;*5Gy_%iV&j@=tGE7Fm#0%n&JzR3D>6?K?xaaBZ4GVB{~I`_SAX-S74T0# z1F@gc)6>sYPTDON`CKxAlWbB)<^m~+sRc$Ouv+;$pyo&1<`7rXXxMo~5>9Zdc+2ED zh(+Eo-6Y0AK3AkbWyR;oJf{XoLm<+Hk@A1|VK4_0=_>e8aRDX`29b8eKP- z_oX16$sjX9gbG`Ykp6RT6~J6`{bN?gyhcrm4mkfcg;sS<`) z#oIs@sY;3Jwd_8-4N%N+=AR`yv+;iJ6k@aeC@GoXm*HCy?X5_FEGr*(Wd?< z&uq={rr5NLfc)(r%B!L9Nsri$Qeh%mC{g`A42C3Q8M7TdVkH8hWtiFrB(sMHVH5~T&jflvBxdAgS*v__JJ zHRqQrE#|M$_-!PPS+L;P{Eozek5=n%hfKr#WWM#|J_R`{uHO?S@cNN%5Py^48jAxr zj2$u3YyFxUE}9x>aOL(o5@yvN!c9wH-#J%LsD>Kj?k`X1Af6bjf=oi=iSQ03$_tOz z`X4U7U*n_ae`22a^3q#ew`?csX8M3FooPay1R3_s?GfKvp19jNQK81Feg2i7v)Iu? zgs0BkAe>N@tc~=Ft30%Oc|!<;0inYIqf%<3OmR`J77kn%f;IfU1H`LL$)_Kjkp_K} zrf4Y$SF@+zUi)4{4W4rcYwmn*50UCnt+@w7&;mhr;Rhc`T&irPH zI5i9nyKyAY-=Y`3EnqI?W+h7?j2N#y-vNHA&8`}$0%^+5xxRYC5+J(lmYN=%z9kcQ zg<%fN#-uk>u75IY!6nEg7^4-1M9g=-wwcH=E&WZ3S$O)GwQ#-mUd85`!7My#9BjZs zFjuqo3g4-wtT6MdY82T(W0JQ1?^8(+*9vV*v(|VOoG1HzDtzQ6tRAUzX|Rk<1_IYAm`seo5eh}*9@3&F(6{6skr9o^(I zP4IZ-%S0DBMEysaGyY-t!}*ka zzLc?-qOQwK08-AFW8R&~(?I=)dG6bZ%s91opL25iJHnc`$|5};_OCxddTSZbf-;{0 zOKqI^;@g9nPw#<}PJM+jfW=Q}?QZCXc_7T+2`ZQbFBCeZaOCOG?v}q98BmkPYdkHU zHWCeoaC54}d6<^Nh$xEX9pLMY=GE7WF2d4X8Kv@Ci1O|7KSlSGtqQ5)4kp=CBiJ>&j=Y&72i#35{@TBBZTK)>U@9hSTaG~ z{m2w9g^YUtoK^rxrgpyfTRfRz`h@IqITJCDrZ1aKHfH^akc^04M1CA1aSb@YMDkwaDwr zSa}$Y)eP+Ai9DS#dZik8CF^v5AN}uCI2kopu^dAS{Ue zF1CYRmZr>bj07%Pvqh_z9 zkNO<^)o#ZY(Pp9=J-YgchM4TKU@dU`-mi0BQ=aWTc`je@&3ooS0q{Z-jcaWWc)kNo zNgTm>WWoZ9+4Fn-d#q^F{{h(^<$iVlu|h0@vU3X9wlcqci;Rq1P_zR^Hh|qCf$a#m zhPuiv2Q_M47=T80m{}-Bg_@oim&V+p--U5Wm6OA%qGVlprEUJLbVSx8xn}y=coNI> zHxxp~PdXMxZCn*O+)VYJyE=8+H2|0Zq=-XK-V2yh;EKL$0ARo&&={4TB3(Lb({;Q) zXI=UzV{n*2nR(yJPe1g~Fu+~vfqUAlp$chTVDIW%291p)tn8H|rDgky*B=K)2#bU! z2}014<;~rA{^G@v3w2#xGJ+7lrluFMef^WGCeD}-A3C{DyUN=N-K)6qFYl;k4_2H( zJkFDVm0S@I&Bw=Qcv~oOu<*OG+LnN1L5;BLopr8@mD-d~Thmz`&KwCWszV9L(Q4^R zzDf48D)R?N?Ttjpt9OO<<+|ff-4kKVgBhq-M@?S|id0-(D^Ikm>Ex4MWDkz}vb^|9 z!rH0w2ni(v_rY|Uj-DPlpkQ4#Mtah0l$5Z;h0k*?KlA~B4D6PT(pch39}{ThU7x5^ zMCjPM4|srvOn`!SuHz&Pvm&G#M6ezO8G0Fwn$fxMjaoiH-+u;LH2xS^vp>9l-(d|a zO(Mf^+q_Xx!_>-Us_-QwCHb>Mu3#MNWrQNDZDNlZS8SNMQj^W|jD8J$Nz@P{RIcNF z0`W9M+rkOAu+oXP=428~0!t=<-T)g1Jm&nH=;-Jf_4W0wrIoW?fZs)%%E_U3-I^G- zqOf1t`0s4)#?|lhjRo}8A+3PQl)c7ILZ; zLh=3oYD7j_$RSN0N9)6?>gu>;WIf+C%iI@3d8MSKDS3GbeSf?FBqxf>H(A5}bn#CV zA}Jp~-T_Jo(u;+x@amIpD!hMQMDN96@lsKJ0HRDJFbn}l+?FpnAcS*sa-Iqc&*&Wk z>Q@N(TfRg`?|~u%LU#CDP55-~b`@C{F0ez~^a|d*Y`YO`D1?zH;`xCB#$CG^>7D*h z?=-k&0gyRup9}mGY5q1n0~2{~*4MkU-iU(cKw5QBB|c@$+k!n5`kyV@JgdOm52kwY zkbC|%hzk*cqM%(0*=$|)Z>RU8YS0$FK#9a~)HqoEl>gt}lmupEL>z4Jbo=@9r~B@Y zds_#~ZzCrrv{w336+}gErKP3mz>bD(X6nZ+$3c|9N|q1!VNiTsZdzG>%wadh_u<2b zO-#`DU|;$jv~)I~b3Azj?6=+riL-nx8wC)(3M+h|96n=bFYZ?agjW@(K@$p47N3ZC z3-a^x%aLbupEMG~fu(rWpTzDdm~ybQx8Vr8-c*sEOWb zHVbo$T5eEk|8Gs@mNrq6J$GK|rVU?QzbuNemzI8jC33+IuClbW z^q82qGf|aH6+CcY3i(`sdXIwyp%|u<$b?=VR=s+Q6ZhkB^F%@U#0`x#165U3`m3v} zPO#;SX2F$D7(xiy>{MMxzH4@>CQluH%>uJR{_4=4A6FyUF2C8mg;?Ox%~bfd9i)t*-B#PuZU+` zlOVjjR#g1#1mq-%9orr-fSqNeei8h`z+xY&zC+{YRO76s7ITYUBBP}Bq%u29KnJ}l zH<~8t@<>`ifsUJdI4#N6^nrb3Y;1cFE?ttm?t@*>sR44Xw38DDn4)$QD?uhWojY(o z27dZvGL)kv=e>N~3h@SJvl?)+6Lrs;)z_a8kEZDrW!ulf<#*Obrmj;KK$N!K*Q=cG zY*=~#PHA9Bh%s#kgHT2@gJqm}*qfuZ4Oa1S6Qqch#mXO{(pr4r@{^^&CaX!*h#`XQt1Wfe6 zCj5++)*oE51Rxy=s3p_n2y$|&<~wnCw6rwS4f)*)nESQx#0{@5&VW)|0(hd>QTYK2 zF5%}T&MM?^)pi4n>YfU_TxGbd4TuA0rBsF!FyyO&1n>nsj3BGJ zT7yW$yFL$#FkumCcjYi@xt|5H*|<|8kF7GWn1JUoqyrb8ED(>CVpz0S9*7Ij(9m>( z|9$~}wN?u*MhfrKG_VcQNx|nRY3k=K0)2{+l9KW@@MV-O*S~3($WY2HEL201EX%+- zS~hC$?F|PHdmvmc>l0Q1YHc(i^B92n-eRAxaOT?TEQQ}$LRwZ~c@Ahwc}2u)rxVp~X-($#>m6`QW=?q{wq64Z*8u48G6gbsa5_Cu zA;Z!~H=>i0NIl16_1;PY*SVVLt5?xL6=VdW9{EX6Z?75(4X~>9ZyX(`g7{DzaX@jk zdyfVJ(R%RUffDkVA|EF48YtP4u!DnxXb@Ex?)tubfR3&LwoDpuB*-b@%@!DoD}4d` zPooCd0*SXyq9J;&j9xqeRYXo0d1-Su5hRZ&(Jm6iQ8`24ynhQ%3z+67Dn zY=yP!jFhpDVyo|gLSDE7NE)Cn27KYgsZ+WRLZV{;J z3uSli-W>or^6k-jD~PLIAm*Hbw23-aVND2H&cGRd4ybcrDIKeY<_~`P*?4_vk38xi zhfLQ(7eThG9e3!Y&Zd zuwDgVLgSh)fO`goSs$JRQZc~(as1A-;Bg_1&7O;ki$Os_hTsEi3-1M0pL%G&pmOv> zx|5(ABhMWd!~!nC`WgeiRt|1|w!b_D9n}R3@e{t&$y9V_4#Zrq!+uHdBo3alo; zdRxQKR!C;N4&>B|^tKM;03ZMnuGw%F)I)oJJ0QN6mzR$_2Ze^(IyFF~+a)TI%M1bo z`;$k2rhN`#4M0H6z_PflXa<(AhkP+R)!Pa$)Ar(YG5oKEkxb3{62P_T&EVUh3Gjy8 z-5AWq()YRKURzs3-qFFMAc z-a`scoScJ{MLM9#1tj{!2ATyb?P{DSODrlW_t=u`+lx2m+ComsLDP6s;| z4a`2NH!!!2Z$1YfI

Ei-d>K!#08QE{$#(+zl6v*nYGeto92g-`^I{URvLe02c! zwDU$mRpgg1EnusLq|+nqO!}AW>eO^}oSw%vE4|5GK>u&~O! zyS;v|jYPBF{W++NjbHEJf1AkVE*#uOEK^RgSZC#^rek%z)%1V1?14- zvkSQiy81v`JOs+9@3n^pBLB*Ievui#6q{2uxCJ9`HyRP!b=`@);mC)R0QhHDvrGWL zJk|!IP7quHo0>@2i*IAJsA^{e)Sv+QkQujqnrHJlLq~cNknY?NHn@Yv{1M3LMqn~N ze*Bo@Hh_~VUU>R6qWnFW>Rr%S83X(<>4R&edfJsXM8KV4JXP%oz}mxypsq(gzsJUP z*g(2n9BiOS9(L1CJOXBoCJ{?8Ca7Wz>NO)NlE_|%#aZfdFH4h o-0y=>1D;^<>Yq5caDA`R#IwZ10zVr(L<&V(TwbhD)ZoMa0r5LfivR!s literal 0 HcmV?d00001 diff --git a/docs/images/regularized_fit.png b/docs/images/regularized_fit.png new file mode 100644 index 0000000000000000000000000000000000000000..96f4355f1343b674f730ea64b1e2ed41862ef14d GIT binary patch literal 21423 zcma%j1z45YyYHquMY=%*>Fy8|kOr0RmhSFGN=3Rv1Vmc88>CCRySw|YJyY06o2&w z8T|1=HVy;dqu5DnI6@$3xX`~aBXd5t;6;9?m+DThZB3nA4IE4$)&@>?R<=%7?+htj zOdK5F+1gNYvaqpmGEtg4IoS!Yvi{dEu-H17v8JKrXF(v8kXJ9Bzi~_5O>^~neRlt# z_evnPd}}^p3?CJ*L=4ARN5kQz)@Q0O^r*19#cDNSXfH^^=-+#_v?wldr2bNYF)7Ap z@Qg>Di!CaDm%_7hdyo7oh3=i=NNlX*$l%G*CfvX;{=t*(t-6d8kqmH{_pttAl-Rq` zL&LD}u>M0DlrRb4d-VW#ZSVtq6kIzL@bVMs<|o+5@bQ2Du(e|tmTYZvILpq}wW?Yt ze+(89Bz}KuO*OFFEM?}GF`w42k*#8IdALNk<4$2WH#dhvL1Am}X=IdTAlfLFI^`cT z01<5vOmS3Ijh{NJ$Ex0I$!+@X5ymNaI83yd%WL20D%Tk!O$k1;jL#b*Ax1bM{NJCA zKZj*O`uC&SH9|*`v_A1GG@v`~>e>gfbmCEowK$|7>&9jPUo5vg5)pG z^cHMNo?r_Eh*A2<(Mjm&q`l2B;$AG$s=|Er>J_;DuHC7!nOWEVWNu^>6n{CoHqJpI zqmwM;1XviIFvl*cmUw+za0kaOi1HbNOG?=4uhvue>=q%!#KcVdu8Yku1==;Zp;#1> z>gt3lDk>#bQyk!xk!{ChPtp&)Yto8}q2if-Z=St$aNscJAllnI4^fQK@wnWSqb$M5 zZ+eoKm*;jmEVXoSu(0lv!fV@^%J2B$!v~0-o?iXgxYlI(yGJyb4mG5LuKO=cP3ch4 z&^{(5g{KL+inAn)PAgCz5{Zh6&ek|~Et)rK&_JWyNRzMc zY=b8O^+#mfaQG~#n9iO!MyfcOcixRz^G?0F+y({)zxw+twr2PEcMkH#>knJeF7Iy+ zN=MHyMm) zJ@{H%7j`#3e*BnF(4}C>tT&!naz;z9KAX0x5k=W^X3Ed*y?PZ=+&iVO7`Fk;Hcxvj4W=<|4)b z|Gjzk7<}DsSZYuwlg(kJmOrMH_aPgnE30gGX~{r+T(F{IcWdjN6S}LbYZkb> zQ<`R$#d#HFHI}=Z+(Qj?nM%?-QhG)?b)v6DA?Oqpbm_&hbJm9BgdA)KpUBU~!@2Uo?}o&iMul2Tc#; zkDe-U2~^!bAYqZWe^<`Je)K4?(fh`5?8n;>YXk%YP*HS1cv_UT-};-I>4=_Bd@$=C+LN zX`6NB^0{@pJ)6+`o|osZMBo0AMhZ*^zb{`X+cqGJv5F$kn|$uDDJWus(Fnvdl?bCp z$NBrOde$7Wp8lw8RCH{jh+i~rmY0@p&X!F;?@eIQ5x29mtEv48*_m&R%r{OHHTlD^3yJ3<^9Q0~1qwOCVB|g{5uN9godIW7JAl3=ItpTt`R8 z*8~70&lO+al4h{~7ZCIj8(tN;Vqm+cT#HC0yeNWM{Nu$j4IVHAOrjkBbK-~5!CLo(V_F$^< z*)PL;d3(b^972^FDhkP5X%_rp&k-H>|@DT8klm@+mw9U?3u6Veyh?*)B9H<&Qz{ zLT5Bt#LYny1?2F0r`TwvGit9^A%zD+Hi4zXC`LF0R0^ZJ>oduW8!!V(^;=-u+uI?K zd5;}tbTR?Fo$*4;i`00piDJElqJ)YPV!H+J3mqoC#@Uj3=N(lhtxEJkp%ZkEv+dxW ziDH^(&rrcY=>pG<-QqVrsEYc>tKE_rWr&X9Hc4_J_A8x91)}LXQLV}A30Cr-vAb4v zk*o8X>y0T=25ix+n%WIuMFa#tF>6)oum1YT=e&)#K9Cmi{ypk1V~)=W30aMeK0sD zNY_a~0Bm0}ckkwtAJVj~b9JuDDk_Ffa^WvvMMaxjbYf#;!MO4t%U3hnoh&^(J>{C# z)z*&u^-H?V!PuA<)TExCo@rq-twy;vi)Qbey{e=08QYY(93}e5crXo2>Ti#G$wbV0 zdDpBnv+2EDOWR9!m#BkO3hTNrv{}21yaSe4?e|Ks$TkHWQA7kB{WB#(4X;nP)N7p> zDsARaeSLl1JUo`cC_qL-C5?@Z z^Q)=xZnsBWN5fJS27~FsbTi@-5+5@%Y}Hxl>HQ)i&;kMiriF9)^%@*?b;+4|Ywdn& zB+^Lt#?j=;pokGz@yc%nS3T4Rrnv58j;KsJ7os~NPU)sl@?pxxTxoADJlviPX6ZHg zR6DGpg3Aj8^I!?OHvM>;k4;Dz_Vp|EY_&by!)2G~At)uwdJoqGIdVw>-@nsK*r+LH zhxL~+$(gRf}1l_gze-+>qB`;h?tldBu6350y!O8J!@U|-j^D6 zQG>a`z`$?`rXmc)s57z!6u%>I{nQK$C~!!a@Q}8YqpOpRwz09tM@L8MCHgSrp1bT3 z8+YJ$z&TM6!d_&>h{()bT^_B_Gc$LLDvCydu?eoy@Ye?_h?B2aXADJ5ckEN>BsA)Z z?f?gafk2lyaNiGS9S1Yb2Gc=NW!H-34J|Ham5d}bs2mq|T$hte;y?iF+KY$#yQQTi ze=O14574C~UC=e!MMFK%Z~1rzllWV9b_IWKl8Srr`1X>4Jw=>k#H$pR=Z?j+jh
ji+BuS8j0oGv&B)1*4+J(~e z&0rFt-`qKzvAiL^X(Ii)(#L(d*{9}nv&YND&7E0WOWd2l+6C?qKbaLuKc9DVc3WD{ zdIGx6ZRCQF$mASWV|r~$O}%b#mg^67XBuqHGWQeH_KpsNW}HsHw=9-_7cHOgPQ zymf5eoBk1k26?msM_Hec+vHwF9(VRkl1xjYE6MV3I7v*OBp0jN$suAYPNwZaD;E#a zJky5YEuJjpAP{S|kV$-%h_#l*>EFzwKw&o{QS>HjSvP7-o>nZes&4A;Q&w`_dCn9+ zK>wJ7Bn#MXe#|th&ewi39KIxbJ1`%Iyato0hWlYIybKC635fkY!NTxs%>ma5#YI9i z+}LwPuDo+gFL~?aeNoYR-C8Aj_4;v@M{E5Y&fbpy?6W7yTzD*akZIDU&k={iDSSJNzr!Z9wAH((|4m-VZ_Acs>Aemx*tMo+Y3qK%VEDQBx{Cc}eO~N@n|2m7! zQw9V}b{WRi>0Eb0_gh0G5dpZ{Q@Q~G>E?=0_e;8VkG7%{v_V)+c zT_>2HyG3p-d=2r;tHvj0HYp8FWjn7lXdS65eELQgGU zJ+QIvHYj8IP5Jd!X%hfm927JE=K!)X@PBhQdHE;<9eaCrzzxlj<{C3qkReKtpFKO7 z=5Hjn=Z~n@X0ra{ow;~SYX&2uqfr6Okl(+50}PgSUfLk6JtQo~8&VVxTlASNw(CDH zxDDKlCwQnQIu1MA1!&2%F|w4{n_zHEsAvh~y|FO0RTpN^78Qio9x?q$cDaduI0^=( zKKG)kswy?8^MG;<4-Zc^dV`5Z|GqWFwlVzh&<>!ENK$^>i6R}N-%t4YiRR}!;bJ^J zJ!$y)r+%!QVt#C0o99EVc`Nf~KE)vo$PJnR8R%5oQO(ZIDrsuIx1OmqxWBzJh1*gT zy$iB!yh41ckSb{Gmg zf+TGr7!u&D0cT|c1zx+!C(ZupAN)lHa3sKhWdf7iTee*`jqtE7?)rrqlZ9GUl(^y8 zBqWyV34pzT1te)#(0ZEd79ja*=N%hg6kem-%Olempid0{{%!1*@_o!AYMzCM zhbMV=!J)3kOv=5hzV74zL;~g^1O8HK4a(8THR( zGwnNyx&%JYOJiRvEB)+deLQOgVds;6jW4HfDJNA#*jx@P82<7!YA1y0iXu(^-514 zHMI%MtEylUznd)Y64uweDF=El&o&9RWM?`)r3k0(1)4aQlHOnkHA zZ^|mlqZ!yUD7LgXxGzB%cc?BcEd|j1S5FT|L7meUF8CvN-T=M;km7w<7;<%WwLXA3 z-+%o0DNCNfuy4zraH`m0bL=6XNBTe5x}tePf(Br^Wf00UAterLGPjp2F)p!2usjqdv^LaPrH>V6 z5>+W<^!_b+NQHhkTe%rf7T5va6$KAyd#TNwfMjrD$R4YQxtybA@N0hBMF*mPRt|H7 zS=Po&SO5^Y@9z$L+d8sqw)XvC;0P(+guI*FZxg1KLGcZuh_U55@Xw_m zki`xt0Ci<^uCA)vLrqN$5GJ-~?XF%OMVpTcNctpYVYb}4_q>2+=Y$VFr%0!6`S$9B zZpZiG&h>PAEW)0F&b~dXZ)h|AU9d&Is3LjOmC5bZ>kQMLB!9Unp9(H_KYCF3g4Ds$QCU?rbKDLvJB=oRUt`Xo^rlKq6W?Gbg z1FJw)3|lTdGkiUGm@wp)E1BP-R{IlVLPXNdLa5A$q4S zoy;O(8z2>T4DwA+;s1kaz6ggKo14?mdmac&D$WyqaDRp4Q-$ZMO#=hDVxdB|`4RAf z+P;JcsO}`wFjauE6R_&O(9lSF^*a3b?>C(tslltO#`sm)92|Zc zbpSxM1D-roS^^{(D#!kmr^F^~kXs>+9w6&B=IB%KMka7sc%d9iLhp|~5`;t<+s)+D zIXqAaFh?L1b08Ix?l(Q$*~+Ez;Q&1@q_g>&f(1#-kEDhMtbgwUQ_M(3;?)*R9R+F! z05AcMs5zQUkaT`F5^c?j`Ja+B^kZp#byXj5W5C3XPFK^yadf;(L<%7?75Ft{GPCts zG3n1MH|*CO_+9pA!;_QOk6a=mB0hflG-A4!1i%ueCig60_tc!6qw_2ZQMviZG_^cf z=4GOzT*5{efP+f5;QM9PfP1t^S8h=a6k8#DFj)aL1{dB%o6D}>45|CY4wK*@wX$++ zzZhQ>aZoDw)=h+;G`%P%hRG@QF>3JCnKd_W*;DOjX0?C24dc%l7e&q|jUf6iUx8^QX2wj!#>y0=*@?D#Nj{vna^I?;fCh1$tRnwQno`jHXA9sCP(le#cX%is#CEK zjQcNlY3kyX*!tS3qCE2{7={mpsAVt-(T+gBe;*MMHG(=nKaXi?)0yp%gGP}C;{%12 z<*+tRPFyQb_X{Go_Csw4@Sn@VsL5`B(3$dM@kv*ukfMvF0%!}|fbaSFfiI|^gM+yn zNK)DnPCKuA-<&UHJlsBoib+5mgfw?Xk=SnzQPp*`!%TE8oj$HJG?zQ+EjwU|Pu!K5 z(4dPo0E1jwUcRH+eub3Ssz!Cd>0kV-+B{cjO#;;w$b~&90VH}Bhw8_YS8p-)3_80h zvF-NlO)U&dmL9tH-_SH^zAI0Q7}$fZ|9^&&Umf^Oue-Ae^)gcoZSCYT?C$RFYPTcm zEOEKE4pspku}FWMc?blx87o01s?u?|M)=?P@z}2H=TA1{U)Zr{07QIaruQdTJhd%a zBfqV~eNqwi7kal6V_Xag4qgKK2Hj2!g=nH}bq91i;4Kho9Jg^JTanR}O!`$XI4v-?-yu%!ni4n`&A+}qy_PDGh(Tp+mx)YS<>M<@`RM?|JtgVC1O*F#1|RJ7~eaG=@&a8LkFi9ENo zuxL$WH!87P5{Dv3z^P3wC!H3L6J}Z8yib2_I7^=&DkHm;%a{$ua=_?7{NL5d>mU&? zN`rD|XJ=`DKf1G15@1#`%|iSJkF%eXGu@;>fIU1r>j2^$4habwB$3+^AG$JwB|Xc# z>3W;dYU(Efz}Ds$)nJ(mi-`el-on>_d>z!m)$XHl3RehZpBko8PS$UAKI3_jKuX}w znib|>qki9NKtn^r)J(v(DkOi{98?s$t&t~SbwvZpv}y(^1UwjokJ84-l@O?`{}(V$ z0a|uqA|bF{V1TId@?gH<2q=tMK)a&50dx>Z5k&XI?|7 z#-Z^Q00^_$5=V8G%!;-@((#)6s52HOi4mN^nxHSEn50}KE-(?^%Dj96|r=8B|0*`b86y==}Zr_hx|7FhrruVS3<93REP& zJa1U^UQb)quL{{~^{$rE(b0d8i=L%M;KZr_G)4h(<$FIrzjIN2L`V5w78VwZYilMy zi*s|ILRTx4M|2PfbhNlGcoRS+Swq9G!+#q==5-qzBUy5m2_J=FOYa;Ij(^0NE4@21 zb0N!lifG@H#Nx3rzOP-6f5#g8r0Tq=MS0>|@vA8ZA1R&JQmHd-x_~d-8K9MGG8E`G z5IHYGubH7Yb9( zk~9DkA(%%c?_^vW%kN00#t0q+)gqUt>3;KA9NzS!%28nNn=?-zW~XYk{^cM;mn6SB zV1`O6R02^_=%kMmpqYTCCQ%-%Nk0HjSJ&1u)rxhi0bKd|^(!_3K`5YN%V0Q3X=o53 zq7(W739q@ozkl2DI6H?47nO~fozPNY8xe8Zy#Y7RTViRyHOBtMmC69~(jV>*rjk=~ zFidULUDN$FBhO!irT?hff-|g)!Awd?}LP~JOi|pa$Jc{D1TV13u_rNyx z8pRKRg_|w84&!Zq+uu_vWo(*bxlIrEBtVsP11{N7CyDKspDMiaec(}B?ZZg2b-bl* z|J~SMGt0aYq5QVk;R_B}`f&kl%udk77VxG1CkxODMw+swCaU7#ddttJ^2tHK51f>b zM!6gh^%SocZEbMQiv??xUIw$_*=?ml!$6e-U8Q!D6zlf6w<7DL01Rk@q zvjdfroyaZ3a`T4&1m*o#Y)Y4oKKwRb8xgfgRKX^(9dM#v%w~=Je{?8wEHR&><(Ku> z+xdySw&b5aeJTO#P^zdeIq*>%(h zdKhAzVpJ@kan)$sjDizlH4|x(Iv7~Dau%EgE*S_Tz(L|!^=#Oiq)J%fS{vBwWlzet zcN8`xth)%zwUa&4#O!#Oc+hqhnncsx*NMP=EUutQ_2wi_$ zh?SqzcWpu}Z=8B#9>I{I24!Cy;{I(qej&kggu*NOb@bOjm!-<}uy+{x5B zF=CV;vtghqh$J)1R_2S$Grt3W)a2MfI6OvhZ@4d%&M(1#g7D|U;cT?ENufj+QhRRR z6-_3ggk5Fv)@e~|Mzp!!QRYC5wEC^TmwQ$b(sYtI6Vc*TMa_eCR+Gmdj&DT~)0iqC9-qy9Qa?zxo}6YxpV`=0>I z8*s7?C{LLo5Vp1B6WTu$>n&6>fcnzb$_B>0yVA2bLT(GZ*}1t= z^I>Wx?dlJJt1fBHBY!!V1-F*-I#hqBL9A|19K|D4nvHU{E=`dmhBJY4ViDY85V#Q# z9GjR}Npqcl?(WWif4##B=+FvaQ0k?I2rJ_7JBh=h@OQ16tBx_N)82$qu>Sfadx+Xm zl5l__^!3RBy@2+~6Q~n`-}QEpc;KF%1{bx&*w|5|N;9uh25HqXts%7r9(v)?E^tLa zaRQJRIF5wE>{?DC-8+EJsdMy{Ud;|D4QTuSL6e_bwQL)LpUB34sjp8RjxF<-rsYVw z{j@W=JS*CV#;H{?D6IEOH@0CwWkB2S&s!2q0gaV(ap3_t2L}hofMcx(&|#uD>dDSW zcj9*0FZ?53qDqLkN>~WxNf(RaZAN=*BquGbV;p}2^ns0i9#g0O_V?gzj_(FxhUv~`_7sx!Pw zEDAYt?A>{*{_~(xM-qRZYNIW2{jE(zgMeJTwI*=t5^Pj z`+#9nX}d7LZwACf41Y05g{J(C5c879zVsiK1a#oAurM)D%;@A2ArurD=c00fmy{^+ zF?*LPfpGCT;AgRsSC? z8@&Y|0`y4x-AVSc8vuoN5=DJQ#Kpz?Y$me)(y!qGOi{;2h7VZ-K)das-!F_gNobbSDruNfzJ5* zwc_wpp*$Wcf-y(ZumGq&)0(4K)f^lgcA$!Sollw3)6<)u{BJsT@rvDa@6A&B{dda| z{oW1^h?u4>#(2)-r4Fjhaqb=)1d5FUTx9rqN5o4{#S;uqzERX_IenY^G9V7Qxw{h; zj#^YeEoPCMUCuIwpmBIa#AOhW+S~8SmP#6DiJ#qF6FO;R<1+WlDBcX%&zu=MJhtn& zUA4=~h0z)k9Bq)4`r4%WN5n>g)fzbXZn+)i=sCHQJY6Sh~k&dCw&! z5urNvhY#53qT}D^p_-zd`~TO8Z9!4jGjBF;YE$)7_F}b&dbGwhiK&*toccm%~U?AVyJLQ={QM z5Z`Un*c@L5Ea`ljtdDrZspD72a#ZJLs_NcMDt{|(S#!iiGTDC_5@Z6#$CtLuK0ZE^ z6_yFz*C6Z=VT1~zHcsZrXpP?8kh%eYHsVDl9~U3okf_P-Kp+|EeSwqmVM{t*?#R+xPKc@&p zSU6Z%Huq!WPQ?Ml)riVy$~=;Bp;@@%gQlp|XHOuD`JBPuf2b`@JvYFglm7R^VJ<*k z%;dHmejgVgnXZRje-qzX_Il}?Eg7O8pDpTd#qhL$-I~J($g7VrF@Y?a=&dl0~DoHnsB$pql_^j1O+=W3my>sFRH{64UF*#sHT8`F!_GgKj{VvY4Uta~-V z7f+oeB8zjg9%}%&L<6#EAQ+>PhX>Rtx~xSU~xpEuWwc2vh}EKvmHHB~oiv6)0^+Z^`my%z}(FcM2%grSC@R z+J1ucuw+(CD1}d$@9hc&l#sZ8&J5LN=>mlu8p2QfO+*eyJ6~X=gkN2Pa^q zBzt|FMl6yWtBd-#Y=XszO0!um+R*^GY?^UWho>mNdM}1Ei(19{AD~Z?C!zrb%)~Hw{cT<)A=>Qy|bn3 zOX~IZi##@JTo6cOKfNTFaf44oC4LzecE(TVR;;C_rbYsmmest!w-*Jp_5A2jG5G4t^`*qN^#PXNw2YbOTjWJy*g$cTWo-!h zggbZ2n3R+RWxChrdl6vizrARBFp=vdKeTs{@Dxb#JmujDBOZ`bgM&c*O5leCp6yJm^d)gZW2r!-Y5tDa z;-l_yiiwO2lJjLX>+8>O4UZ0`;dSZT2J&5DAaQ3F^^FfaV%947D?Kto^X)3~Db`8r)20$aMQTwJcvPdh|^aM$l|E1h)7gkR8M@{flFcB7XY7lGOF>gB|c zv>W^6X$XrK6STF2%$|og8cn~kJ3~@-sVZgdk|%5Ods#qe1U4s95akmp>>_FpZng2( zs6e?lty=}G1H?m?fVaiU$ekTMG-ab8HJn}+q2!mlkM>d}h(MLBpPP$K)FYP|pobE- z7hF3Pm?H7eG>}#r@Tov{Rc_qV-94$^wOUyA1+~?Auvm^0q*;*Ybg3d;eJ_FCUb|VQ77|hub;r+OY zyu%uN&1PUrIw7KdRFC_#zRpi_qAyxL@Ddd%0n7+>wO<;Ir_(?`F(Xa zU-(bd_3|Q$NaK#xio$1aQYiL;dNl9*;H`k94SRuI7k=1tXpSEJd1*A({rkJ$ez1op zaU7|v1ugkBMnfHgv5~^EC5cUM^WF@p;9(l8E`sBW%NsdfH_OvDpZZX_)!SlQ`qpoa zWVTO11@8gD1GY6o0)f$$1e($gcXa}aQ}p=7m6e)b^xqL6vw3fpgXfYGhi9;&)UdKD z$18Kh&K=fay}(sWsnppA=c7MNd7^=JJZk)@l)KUW2jn1+ni;3Y_{pqPLA-lF@HX^P zmGL_2NXj|+Zz6P#=BsVg*=S*HsON5ALB1+S_VHyI11d(qdOgW#mhVw%DzMCEwr6X5 zD^aOuKb^K$s)%OklQ22A9kc5k4NTfZ!p}tWRlN+gs3zgHc>(O~M2AweG=_x^+HsKq z*LEOGE)EBYonplrt_^Fq{W4mk)+0TW7FTnIsm()Og2l)(8g$4^wFU}ArNGWiRBm?m zig+BvjQCc$*jgk$U&*iQ2GKyX3EPcr);aZzbRXDC<}M9jg$2P;`;7rIQc<5&dxij` zE%QNgt?=`+_UTupmkTw9w{|sneh^}RWR0TXU(P5m3!Y)kbr|nX4M=8pV)*{3Tb4<) z7rAJ(V`Zurg~j2p5jCm$*6-&DZ1u98hK2@qgI0Jb<R<_^5y5N#gKfPIc4QN)ORkA$-|De4w+!1S?*(={lH1}; zTY}wB%}9a5eN|@F?}z)Dr0q!Wk9NP<0R_~X$y6$#tPo@!iOt@BwG*l%*84{A^{cLl zIx8+DuOX1lQ4Co~XK$e`&XKjhAp7S}ToCC6odIFd(VgdeGyVHjz>k{+w#VW5x#Wu% z@c68{=oQnI;j`5|m`}+__pEyL=g;lVPmWp`^Jot5VlS4a{0$_t_7{Mo{IbbpsSvK} zqt4V&r!v#ZrbFdp5sy*lfnrs|?#3Cn1G{MDl06dYj*#DOLcDybar$VJxv3g=i5FyBCucM z29~;zXv8PgmUD-d^0nTl7*KuVy8Vst`$XaR&7oJbGcE|!JBU`p2gs$_tb#AOF1RC$owTz98^+B-Dv`P)6I$8n{LZCh@V!GGytG4ragnoh- z)HnwYPSJMz%NzcWjSTHd)%|TbWbEoxc-P-qXCXeYX5o3-zA;pN(2v}G7FZ+b7gM7I zsVBv)$aajCjL-uDUKgWlrp54W^Ndc=6kNbjj15P%-6HXx@<0KCHi%eO=6y^^Si|`W zTnDJqxwzO82%IvuIz@eb%4l-o>HRC_ez_fF%{p}lC<8hja4H^F)pYH1^F7tqF8Fe2 zEr}zuAN4|2HMh0vDO$;nB*Hp6Iww*4i-*ueo2!|di0sd=*n~Q@k$3fvx;4$>G z`9JZ8l+UpnO&SRw?L;|PHL0UsHSmuCl)ZR%S2YZD*+EQH2`s^Zd`Y&KvdY!Es-bBU z&+G&St4GFYnjZlW`9fQNV{@o=T9y-%$YV_m4-3O%GaD8`z`C3nO5O_83HgA1Ac`IV zNNogiNrWO^ZYf<>2iZv~d7gc%*NIrnR0GzIpmoyN)HJyuCL)5=9r^C&Ou6gdgS~2duU%4aOgiIB!ucVcr1Y*hP{8^3XP8@^d1dK zU=fMjG{a%(FEo0qX2c#`u{1X4aHd5Q;qv07E1 zuuj{n$$bdt+TtoI)8v}RZn9owbp*F+ruOAYY8K6!|7wqq+*=Ce4{TlB>E8QHRP0y{ zS=`tN)v0qyDgYiH$MqA35ON#%8w2?+bl-4BU1I#;-}BEe*R60AHhM5-Z04<-`|E-M z^kSEEJ!uPa^ZjH~@(D2xkqDud(i7yruJOy3D_^Wri93AbCEq3rOMjk5jtSSx5Yhn9s9yJ{%L9l%T7h4+n#UdfydZ1&(u}S+dDkB@Cd5|Ngtrb*z*i4 ztgwMG7@%QsNe{-Z7`t!-h9RfI;{gluS;Q-WNVUJPcQ!CTDmC{Kw8*tuU`~*KAZ6jz zpP8DDCMwLcP%?Ok;<)|zHmX%~l-y#UMLcqMJ1}$^4x55r5`>?-BLsj+R~Plgbid=v zz1RMJ?UAllLWM9~xprRXmbT7^2BkNamqugBO02x-5l1a0EFxiOcoLIeh3omvSd3yw zi$;-Vqq#Ee@!FHfK#wFyDfhppyY(p}ed_QdljF5k#L<};!8VI_L{Te7C#$b!<9DM^ zmAdHLg5S!Sb}C`Z3!hhgRS14vGsc(nI;yt=RXwaU?EI)1d=4Q#jf9%0V%gsOm4`2+ zlHYMTo5;VJ8;s|#9mJ=$JukkMle@$E-WHxo_{cl*jsg+KVG$v7gEGjX2Pd1wKcf;6 zwp=(sQ>w(JokuI=HB=|nG+o)!pkLUE>qkr(z_TZU$S78Ojw5a4Ke zE%ObY7%egZUg#&sk_W?&?CQnBL2JhF_wfeg5!4s-PAHO@vTU@|9xw7_v9}Cv4nJ5M z?1foZqUYs3E!GT=ovKZ?+F|I_8ex<+{N5eH#uX)R22FxXW?3e;=1LJQ&x**cGIA4R z087A3g|ncr@VUwO%X$tQ(sae>W-y2+Jqz)s#`c<9=1vh)%Y zsLZC6r#;xsiwX)+hVWi4DVV@1%;tBmzh&(AQKg4mhsdxqbs|Y*c`ve--qk(1D2Fx) zcBVKqQ)6g|OI(WBX@P(Z~339#!o+w9U=Zw$S0aNS~w=B0>A-9>8y8(OmV6 z0~<|aE@xu&ZP1bZJt2ksd;lv1f_nBW6KNcUkZXX^Yibvs~A$=fL1FdSmpddbByPtL)~omQG<3R--5V`AFJUBO1m({4_=S)K&oly`J_d z!W`Y*q`=SIf|G4YzJ(tPzpYuNc!8vApl}*!i(J~X76M%>tml2tdy>g^mv&FNwP9Ou z*v<@N7PUc>)sq?;7s`R0$(;^z21^%8er=LP;Prz@X23sA7_J>Zvc2iQ-8P_~1sD0; zFaIP8r#kA-t_Pt5t9`!@KU9t+^-O;RXTgQGLJ10;m9tGg0TGE)&QBNGZyJdhhkLS! zVVqi%+g5|R#!j1ZCVSG-T}ZJ0aFb;OL@hP;*M6Wk!978gdSF*Szf9Qk<8UTtTm?;O zXxV5km=QTt4Zn~pL0d?IoNgCpTL09?HVK!?pDuXkcI$c!V3!71Mf+=FmX}km>GYoe z?(YDsy#H|l>hcV4{%7MzhxbH*Mj$XYK-UN~=3@?=o~=~z%nzwFcWcI!e5I#{i8Wg+F>q4Vu(}CMS7A2HpgG z(RP3)aPaY=y&}-gTkwkU>jbeyCT?!-U%!5p@Wlsa)lod^Yn#qU1%1yzvxH+*EdYBi zbd(shK^q`JO0L6r0ouj^Voe~v?+4;Q&`w{g>GJNJ6BR&SyFi->^i-W%PC*l?)PLJ( zfp|=xZ0@d_+l$H4ldnv*t>G_pjbmtJgt7Ct(H84ntWy^vMeg2i-UQNtAYk0Qu&`jR z2m0W1a_A&@g}|l?zmaN7ONY&C{$%1}vAr{0p-5XC1g=xLp^h6!9ExWSJu{Qidu?gv zIPt5((qhZ1_hR+CrGW3f5a>ZzTwVrydO*NI!Bs-D&Y)Kq8#Ne2N;#)7H*0_JfL$3( z?@UgTHPd7F7M^MqgO#S+PQkM2sVwl4-qPdI#c=%9J5sT?pa0NQ-4MXJOzF;EY!dR2~~fpgB%iS}Bw-KjT&ae@{<0+^&+0&@ zhMYw~e<{zk@bUS1CxFXQ^;5NU-WU_)T~!kof92c??z+RHqJlLGCBX(G26LdLLryml zMoe5BmPM!5xLnW9uH=<~KPIhfJ0?hP?$;eOx^y`Vxr#;%o<-+9G_C@Ippy-ryb2j2);|vbMGFT?JLpO!LIztUfWt(9Z)cozq0W@ODH;e$ z);?_jvA#vn6JZ^_w)ktms}Qtpo;0qs{r|vB5GN@GiPA`7Zm1g$)m5QkqEY@UAa{e= z4ARR(?v4zJBmOZa)>vjR9bKZr%+j*55?)^Q9ed|}WK(E=MkUZ2r&6KWTJs^w*t2hz ztpB-v3KB{+X%Jr-+&@`lQ2k%+hmrlAoLvy4Taj9&d;l8(kQVB5gVOT11>+jvC#(E< z>WH`+{l8bJW9wxB&;)_-&++PksBn8}@8;%KUNZ>tfuJq<*C!lKY_mU2cLxKj1#h0Q zdtKVaLN|f|TctD;q>5yNrZ8k}|Np)TO{&16Cg-~+YJ~g_v_e8cpn(hQ^MOM^uwnk> z5y%6Fg3t!o6(c2b*#)GZff+8rgm3iwyE54Q!#UEJk%@m}brl?zut)1zoMFfN$I*|h zpBov`toJ1aZf=?bdpFbf;eN8pHp4)WptqLUj3wa=2mm<5#Hav(0vFy8(8Lhy$av1R zcDWRusALWk)VEq%vT1zw0bn5J6zMi>n-}sBu^Ymlf$b**HM8Vhb|;_j&DO+vfc#%x zZn{TL$vrTaK7;)u&Vb*g0-ot_utURluw#UvLODWwDoAoLKt9W4JEW!}0DHRv#2=pt z3Z{&hbNvQ@k0I}kjER|mYD*pxp^E+@@pV80+9yz#7L)*OQ zfz|m8#OOJlC>ur8wxlau#RVTr1uUz;)>iSpG(iH;YRv`$ zntSHVnun!*z%{iy-_=-MUtb2(?0aEh2gqNO7McXeWVeE^4nbakN<<``z+`SZX~(1i zq)C5|k1qph6IkQHAfLUmwp$J}k^G9WrX3WMrL{E#use#-+3(kc0#5Wkpk+wPau}2^ zmN=P@pFhJB5D>s4AsGR~5$ajW6}5c?$qi_057=l01S!ZM`w10K%)p)xV6OzQd?+a? z`C*Xou;~sckk}O_m4E65i{*UelWC9RK~RNf8@*H1Oo}q{;uuX7{!bI<9uH;O#_=K5 zWNi*BAKFpEA`Q!|L##uDNRjedrnW|u!>BZJ7>6919BSgtQrn9~qji2IXO&E&LDZ6V z%2aD?O->b(*ogP{^w<0O%wO|-o|*f3p6j{p>%Q*a@9U*X#3J%PW*~vd0_k#(I6H#b z>?1Prs{AwcePDgQMwe;CCG6zw9r%4W4-paxNW;UXz3{YM{Wv+~X>MkQ-L`XL8124O zJ6{eOkTv}QId`Wsalri&wfN}6{E#W5En9fEZr?5kRpd%&aBvZtZM_+mBdSLRt!!-z zTG=yt*ZPCY@SsBP=WKr5bx}vNn#uU<7W*;oPPtf2=gl54X1YH-U^IcDjw9*uLYd6X zP~p5Cx>Klf{@!B*fcp1ArE3WV#$%l<8^?@$hYe}8Y;ohUJYKH2q^gR$bLY;hFu;BH zOE(ey`w|zY(|8TLd>g|wR<@4@L0LtFH3sMgGqo2Hp6SXuWw2PJwT{=mx&>B=p`KnU z{>$6dl?+iIwW7KCX@4tD^2V;r$|P2Fbg8aN#&8fg9IDPI>xefd)bDa`u9qN9u@o33 zsZ{Db`|d>^^rAHi&#q8|XfcoyXJ8gqMDhpY$^N0j zjk{)${ec)Dfd=AmI9gCL0B<4yU4{#jCgGp+ zZ3xZvza!dwmKY?|;b$RRhbY?Qa0i)aq9P=H+v2T=CTf}O332@z(i9LeMF6Ux(F?dr zCqdsB9}g9nS{@V2!3DiNZEbP%T%a z&Ec(Q%SanJ@wxOsQ(VN8IJTWh-%NhNOxRt)9}$v#2*~7sKi*L~n&eY?4`LclWe~1-Re{H9 zToHUEHVL0p`3xmgmJqzbH`v2ngnm48f79_{AQlFfw}u^*r1ZS@hQv}~)r#2ISej7h zA{o8Em6d;U!O=SY!(GqjpAYZGZ+j}{!^RxS)QjE z+;sfDxrN11?qHZjHuCYLbwMV5|Ne~hu8|pKQtanXk-|Cg+bDY7(FdL*cjD%Z-E>u; z3DwrtE*)?qJ@TO6jGZ-52Nvhbt?$Q%u zzmmTH(4mboMSBdK=+&3FE;_(tZL&+4?Zqi)8{zBuhBQ7lrYz3)(C-esp%GzBuTMuk zXvXviTaxG>uoy`X7U{P|PKX?8p74M`*}Z#rbNI0A)&5b>&iE;b4BGWYd&~Fu`jYDg zxq-9{S6%H7e$C2Pv^}!n51IBfnyj`Xn(AVsI_%!~osd8u3td{5 zdSy*b8tw!lkM^;=tV{_l5mi!B1M|u64GBw&Y~lN4z}~28dFJ{SiQ)Vs5yq}nvtQuyuTn9(TZvj8%{nJkZ1jWRxW4ZTLu*gVS7hO%v&2vSG&@grf zssugE2cydzA|X>dyQQ9%fBE>_eL3B`UPl`p^_-lXlG`Y${}b7}#~{Zv>hF#<_4W1J zYM$sQkr!EQS8m1sqcR;sp;v!w;kr4&Z{zo|iJ;C~Ur=`Osf7@Mu^_qr7o72yDa zaVIq>lnw-Te_tO70^U1AStwYFs;U%7*W>?TaRmNcE|-B>#ehoXxbPedb5jJvNrPcR ziA0OCw)6XuI25Sr7hEu`qLvm7C%L9)7r>S&LX|@3tE!Z^vztX+|ugWdw{Acf0559LITfr)vbp+%Q|vAr_XXj);>^~FpDaa zSk?tjPNz`jpyc!3vu7I;%C&dzj>~H03kyTegv8DmuD>9E3|FmN?c=VJ!Fk1Ei&K4Q zS9EW_@W5iW2*$v@2C9K(>qB7tnTRG-Nmv!ei`dY!xUgFCWs{SxhrFJRJ=^?b0&-fX zAOsOBqi2a(LxsQz47 + +The simple example above is a function with 2 input features and 4 parameters: +$\theta=[0, 0.2, 0.4, 1]$, which are the function's values at the corners of the +input space; the rest of the function is interpolated from these parameters. + +The function $f(x)$ can capture non-linear interactions between features. You +can think of the lattice parameters as the height of poles set in the ground on +a regular grid, and the resulting function is like cloth pulled tight against +the four poles. + +With $D$ features and 2 vertices along each dimension, a regular lattice will +have $2^D$ parameters. To fit a more flexible function, you can specify a +finer-grained lattice over the feature space with more vertices along each +dimension. Lattice regression functions are continuous and piecewise infinitely +differentiable. + +### Calibration + +Let's say the preceding sample lattice represents a learned *user happiness* +with a suggested local coffee shop calculated using features: + +* coffee price, in range 0 to 20 dollars +* distance to the user, in range 0 to 30 kilometers + +We want our model to learn user happiness with a local coffee shop suggestion. +TensorFlow Lattice models can use *piecewise linear functions* (with +`tfl.layers.PWLCalibration`) to calibrate and normalize the input features to +the range accepted by the lattice: 0.0 to 1.0 in the example lattice above. The +following show examples such calibrations functions with 10 keypoints: + +![distance calibration](images/pwl_calibration_distance.png) +![price calibration](images/pwl_calibration_price.png) + +It is often a good idea to use the quantiles of the features as input keypoints. +TensorFlow Lattice [canned estimators](tutorials/canned_estimators.ipynb) can +automatically set the input keypoints to the feature quantiles. + +For categorical features, TensorFlow Lattice provides categorical calibration +(with `tfl.layers.CategoricalCalibration`) with similar output bounding to feed +into a lattice. + +### Ensembles + +The number of parameters of a lattice layer increases exponentially with the +number of input features, hence not scaling well to very high dimensions. To +overcome this limitation, TensorFlow Lattice offers ensembles of lattices that +combine (average) several *tiny* lattices, which enables the model to grow +linearly in the number of features. + +The library provides two variations of these ensembles: + +* **Random Tiny Lattices** (RTL): Each submodel uses a random subset of + features (with replacement). + +* **Crystals** : The Crystals algorithm first trains a *prefitting* model that + estimates pairwise feature interactions. It then arranges the final ensemble + such that features with more non-linear interactions are in the same + lattices. + +## Why TensorFlow Lattice ? + +You can find a brief introduction to TensorFlow Lattice in +[Google AI's Blog post](https://research.googleblog.com/). + +### Interpretability + +Since the parameters of each layer are the output of that layer, it is easy to +analyze, understand and debug each part of the model. + +### Accurate and Flexible Models + +Using fine-grained lattices, you can get *arbitrarily complex* functions with a +single lattice layer. Using multiple layers of calibrators and lattices often +work nicely in practice and can match or outperform DNN models of similar sizes. + +### Common-Sense Shape Constraints + +Real world training data is often a somewhat biased representation of where the +model will be applied. + + + +Unconstrained and flexible ML solutions such as DNNs or decision trees often act +unexpectedly in parts of the input space not covered by the training data. Even +though common forms of regularization can reduce nonsensical extrapolation, it +is hardly enough to guarantee reasonable model behaviour across the entire input +space. + +TensorFlow Lattice provides several types of *semantic regularization* through +[shape constraints](tutorials/shape_constraints.ipynb): + +* **Monotonicity**: You can specify that the output should only + increase/decrease with respect to an input. In our example, you may want to + specify that increased distance to a coffee shop should only decrease the + predicted user preference. + +![linear fit](images/linear_fit.png) ![flexible fit](images/flexible_fit.png) +![regularized fit](images/regularized_fit.png) +![monotonic fit](images/monotonic_fit.png) + +* **Convexity/Concavity**: You can specify that the function shape can be + convex or concave. Mixed with monotonicity, this can force the function to + represent diminishing returns with respect to a given feature. + +* **Unimodality**: You can specify that the function should have a unique peak + or unique valley. This let you represent functions that have a *sweet spot* + with respect to a feature. + +* **Pairwise trust**: This constraint works on a pair of features and suggests + that one input feature semantically reflects trust in another feature. For + example, higher number of reviews makes you more confident in the average + star rating of a restaurant. The model will be more sensitive with respect + to the star rating (i.e. will have a larger slope with respect to the + rating) when the number of reviews is higher. + +### Controlled Flexibility with Regularizers + +In addition to shape constraints, TensorFlow lattice provides a number of +regularizers to control the flexibility and smoothness of the function for each +layer. + +* **Laplacian Regularizer**: Outputs of the lattice/calibration + vertices/keypoints are regularized towards the values of their respective + neighbors. This results in a *flatter* function. + +* **Hessian Regularizer**: This penalizes the first derivative of the PWL + calibration layer to make the function *more linear*. + +* **Wrinkle Regularizer**: This penalizes the second derivative of the PWL + calibration layer to avoid sudden changes in the curvature. It makes the + function smoother. + +* **Torsion**: Outputs of the lattice will be regularized towards preventing + torsion among the features. In other words, the model will be regularized + towards independence between the contributions of the features. + +### Mix and match with other Keras layers + +You can use TF Lattice layers in combination with other Keras layers to +construct partially constrained or regularized models. For example, lattice or +PWL calibration layers can be used at the last layer of deeper networks that +include embeddings or other Keras layers. + +## Papers + +* [Shape Constraints for Set Functions](http://proceedings.mlr.press/v97/cotter19a.html), + Andrew Cotter, Maya Gupta, H. Jiang, Erez Louidor, Jim Muller, Taman + Narayan, Serena Wang, Tao Zhu. International Conference on Machine Learning + (ICML), 2019 +* [Diminishing Returns Shape Constraints for Interpretability and + Regularization](https://papers.nips.cc/paper/7916-diminishing-returns-shape-constraints-for-interpretability-and-regularization), + Maya Gupta, Dara Bahri, Andrew Cotter, Kevin Canini, Advances in Neural + Information Processing Systems (NeurIPS), 2018 +* [Deep Lattice Networks and Partial Monotonic Functions](https://research.google.com/pubs/pub46327.html), + Seungil You, Kevin Canini, David Ding, Jan Pfeifer, Maya R. Gupta, Advances + in Neural Information Processing Systems (NeurIPS), 2017 +* [Fast and Flexible Monotonic Functions with Ensembles of Lattices](https://papers.nips.cc/paper/6377-fast-and-flexible-monotonic-functions-with-ensembles-of-lattices), + Mahdi Milani Fard, Kevin Canini, Andrew Cotter, Jan Pfeifer, Maya Gupta, + Advances in Neural Information Processing Systems (NeurIPS), 2016 +* [Monotonic Calibrated Interpolated Look-Up Tables](http://jmlr.org/papers/v17/15-243.html), + Maya Gupta, Andrew Cotter, Jan Pfeifer, Konstantin Voevodski, Kevin Canini, + Alexander Mangylov, Wojciech Moczydlowski, Alexander van Esbroeck, Journal + of Machine Learning Research (JMLR), 2016 +* [Optimized Regression for Efficient Function Evaluation](http://ieeexplore.ieee.org/document/6203580/), + Eric Garcia, Raman Arora, Maya R. Gupta, IEEE Transactions on Image + Processing, 2012 +* [Lattice Regression](https://papers.nips.cc/paper/3694-lattice-regression), + Eric Garcia, Maya Gupta, Advances in Neural Information Processing Systems + (NeurIPS), 2009 + +## Tutorials and API docs + +You can use [Canned Estimators](tutorials/canned_estimators.ipynb) or +[Keras Layers](tutorials/keras_layers.ipynb). Check out +[full API docs](api_docs/python/tfl.ipynb) for details. diff --git a/docs/tutorials/canned_estimators.ipynb b/docs/tutorials/canned_estimators.ipynb new file mode 100644 index 0000000..afd4bcf --- /dev/null +++ b/docs/tutorials/canned_estimators.ipynb @@ -0,0 +1,722 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "7765UFHoyGx6" + }, + "source": [ + "##### Copyright 2020 The TensorFlow Authors." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "form", + "colab": {}, + "colab_type": "code", + "id": "KsOkK8O69PyT" + }, + "outputs": [], + "source": [ + "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "ZS8z-_KeywY9" + }, + "source": [ + "# TF Lattice Canned Estimators" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "r61fkA2i9Y3_" + }, + "source": [ + "\u003ctable class=\"tfo-notebook-buttons\" align=\"left\"\u003e\n", + " \u003ctd\u003e\n", + " \u003ca target=\"_blank\" href=\"https://www.tensorflow.org/lattice/tutorials/canned_estimators\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" /\u003eView on TensorFlow.org\u003c/a\u003e\n", + " \u003c/td\u003e\n", + " \u003ctd\u003e\n", + " \u003ca target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/lattice/blob/master/docs/tutorials/canned_estimators.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /\u003eRun in Google Colab\u003c/a\u003e\n", + " \u003c/td\u003e\n", + " \u003ctd\u003e\n", + " \u003ca target=\"_blank\" href=\"https://github.com/tensorflow/lattice/blob/master/docs/tutorials/canned_estimators.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /\u003eView source on GitHub\u003c/a\u003e\n", + " \u003c/td\u003e\n", + " \u003ctd\u003e\n", + " \u003ca href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/lattice/tutorials/canned_estimators.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/download_logo_32px.png\" /\u003eDownload notebook\u003c/a\u003e\n", + " \u003c/td\u003e\n", + "\u003c/table\u003e" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "WCpl-9WDVq9d" + }, + "source": [ + "## Overview\n", + "\n", + "Canned estimators are quick and easy ways to train TFL models for typical use cases. This guide outlines the steps needed to create a TFL canned estimator." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "x769lI12IZXB" + }, + "source": [ + "## Setup" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "fbBVAR6UeRN5" + }, + "source": [ + "Installing TF Lattice package:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "bpXjJKpSd3j4" + }, + "outputs": [], + "source": [ + "#@test {\"skip\": true}\n", + "!pip install tensorflow-lattice" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "jSVl9SHTeSGX" + }, + "source": [ + "Importing required packages:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "both", + "colab": {}, + "colab_type": "code", + "id": "FbZDk8bIx8ig" + }, + "outputs": [], + "source": [ + "from __future__ import absolute_import, division, print_function, unicode_literals\n", + "\n", + "try:\n", + " # %tensorflow_version only exists in Colab.\n", + " %tensorflow_version 2.x\n", + "except Exception:\n", + " pass\n", + "import tensorflow as tf\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "import tensorflow_lattice as tfl\n", + "from tensorflow import feature_column as fc" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "svPuM6QNxlrH" + }, + "source": [ + "Downloading the UCI Statlog (Heart) dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "both", + "colab": {}, + "colab_type": "code", + "id": "j-k1qTR_yvBl" + }, + "outputs": [], + "source": [ + "csv_file = tf.keras.utils.get_file(\n", + " 'heart.csv', 'http://storage.googleapis.com/applied-dl/heart.csv')\n", + "df = pd.read_csv(csv_file)\n", + "target = df.pop('target')\n", + "train_size = int(len(df) * 0.8)\n", + "train_x = df[:train_size]\n", + "train_y = target[:train_size]\n", + "test_x = df[train_size:]\n", + "test_y = target[train_size:]\n", + "df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "nKkAw12SxvGG" + }, + "source": [ + "Setting the default values used for training in this guide:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "both", + "colab": {}, + "colab_type": "code", + "id": "1T6GFI9F6mcG" + }, + "outputs": [], + "source": [ + "LEARNING_RATE = 0.01\n", + "BATCH_SIZE = 128\n", + "NUM_EPOCHS = 500\n", + "PREFITTING_NUM_EPOCHS = 10" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "0TGfzhPHzpix" + }, + "source": [ + "## Feature Columns\n", + "\n", + "As for any other TF estimator, data needs to be passed to the estimator, which is typically via an input_fn and parsed using [FeatureColumns](https://www.tensorflow.org/guide/feature_columns)." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "DCIUz8apzs0l" + }, + "outputs": [], + "source": [ + "# Feature columns.\n", + "# - age\n", + "# - sex\n", + "# - cp chest pain type (4 values)\n", + "# - trestbps resting blood pressure\n", + "# - chol serum cholestoral in mg/dl\n", + "# - fbs fasting blood sugar \u003e 120 mg/dl\n", + "# - restecg resting electrocardiographic results (values 0,1,2)\n", + "# - thalach maximum heart rate achieved\n", + "# - exang exercise induced angina\n", + "# - oldpeak ST depression induced by exercise relative to rest\n", + "# - slope the slope of the peak exercise ST segment\n", + "# - ca number of major vessels (0-3) colored by flourosopy\n", + "# - thal 3 = normal; 6 = fixed defect; 7 = reversable defect\n", + "feature_columns = [\n", + " fc.numeric_column('age', default_value=-1),\n", + " fc.categorical_column_with_vocabulary_list('sex', [0, 1]),\n", + " fc.numeric_column('cp'),\n", + " fc.numeric_column('trestbps', default_value=-1),\n", + " fc.numeric_column('chol'),\n", + " fc.categorical_column_with_vocabulary_list('fbs', [0, 1]),\n", + " fc.categorical_column_with_vocabulary_list('restecg', [0, 1, 2]),\n", + " fc.numeric_column('thalach'),\n", + " fc.categorical_column_with_vocabulary_list('exang', [0, 1]),\n", + " fc.numeric_column('oldpeak'),\n", + " fc.categorical_column_with_vocabulary_list('slope', [0, 1, 2]),\n", + " fc.numeric_column('ca'),\n", + " fc.categorical_column_with_vocabulary_list(\n", + " 'thal', ['normal', 'fixed', 'reversible']),\n", + "]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "hEZstmtT2CA3" + }, + "source": [ + "TFL canned estimators use the type of the feature column to decide what type of calibration layer to use. We use a `tfl.layers.PWLCalibration` layer for numeric feature columns and a `tfl.layers.CategoricalCalibration` layer for categorical feature columns.\n", + "\n", + "Note that categorical feature columns are not wrapped by an embedding feature column. They are directly fed into the estimator." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "H_LoW_9m5OFL" + }, + "source": [ + "## Creating input_fn\n", + "\n", + "As for any other estimator, you can use an input_fn to feed data to the model for training and evaluation. TFL estimators can automatically calculate quantiles of the features and use them as input keypoints for the PWL calibration layer. To do so, they require passing a `feature_analysis_input_fn`, which is similar to the training input_fn but with a single epoch or a subsample of the data." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "lFVy1Efy5NKD" + }, + "outputs": [], + "source": [ + "train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(\n", + " x=train_x,\n", + " y=train_y,\n", + " shuffle=False,\n", + " batch_size=BATCH_SIZE,\n", + " num_epochs=NUM_EPOCHS,\n", + " num_threads=1)\n", + "\n", + "# feature_analysis_input_fn is used to collect statistics about the input.\n", + "feature_analysis_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(\n", + " x=train_x,\n", + " y=train_y,\n", + " shuffle=False,\n", + " batch_size=BATCH_SIZE,\n", + " # Note that we only need one pass over the data.\n", + " num_epochs=1,\n", + " num_threads=1)\n", + "\n", + "test_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(\n", + " x=test_x,\n", + " y=test_y,\n", + " shuffle=False,\n", + " batch_size=BATCH_SIZE,\n", + " num_epochs=1,\n", + " num_threads=1)\n", + "\n", + "# Serving input fn is used to create saved models.\n", + "serving_input_fn = (\n", + " tf.estimator.export.build_parsing_serving_input_receiver_fn(\n", + " feature_spec=fc.make_parse_example_spec(feature_columns)))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "uQlzREcm2Wbj" + }, + "source": [ + "## Feature Configs\n", + "\n", + "Feature calibration and per-feature configurations are set using `tfl.configs.FeatureConfig`. Feature configurations include monotonicity constraints, per-feature regularization (see `tfl.configs.RegularizerConfig`), and lattice sizes for lattice models.\n", + "\n", + "If no configuration is defined for an input feature, the default configuration in `tfl.config.FeatureConfig` is used." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "vD0tNpiO3p9c" + }, + "outputs": [], + "source": [ + "# Feature configs are used to specify how each feature is calibrated and used.\n", + "feature_configs = [\n", + " tfl.configs.FeatureConfig(\n", + " name='age',\n", + " lattice_size=3,\n", + " # By default, input keypoints of pwl are quantiles of the feature.\n", + " pwl_calibration_num_keypoints=5,\n", + " monotonicity='increasing',\n", + " pwl_calibration_clip_max=100,\n", + " # Per feature regularization.\n", + " regularizer_configs=[\n", + " tfl.configs.RegularizerConfig(name='calib_wrinkle', l2=0.1),\n", + " ],\n", + " ),\n", + " tfl.configs.FeatureConfig(\n", + " name='cp',\n", + " pwl_calibration_num_keypoints=4,\n", + " # Keypoints can be uniformly spaced.\n", + " pwl_calibration_input_keypoints='uniform',\n", + " monotonicity='increasing',\n", + " ),\n", + " tfl.configs.FeatureConfig(\n", + " name='chol',\n", + " # Explicit input keypoint initialization.\n", + " pwl_calibration_input_keypoints=[126.0, 210.0, 247.0, 286.0, 564.0],\n", + " monotonicity='increasing',\n", + " pwl_calibration_clip_min=130,\n", + " # Calibration can be forced to span the full output range by clamping.\n", + " pwl_calibration_clamp_min=True,\n", + " pwl_calibration_clamp_max=True,\n", + " # Per feature regularization.\n", + " regularizer_configs=[\n", + " tfl.configs.RegularizerConfig(name='calib_hessian', l2=1e-4),\n", + " ],\n", + " ),\n", + " tfl.configs.FeatureConfig(\n", + " name='fbs',\n", + " # Partial monotonicity: output(0) \u003c= output(1)\n", + " monotonicity=[(0, 1)],\n", + " ),\n", + " tfl.configs.FeatureConfig(\n", + " name='trestbps',\n", + " pwl_calibration_num_keypoints=5,\n", + " monotonicity='decreasing',\n", + " ),\n", + " tfl.configs.FeatureConfig(\n", + " name='thalach',\n", + " pwl_calibration_num_keypoints=5,\n", + " monotonicity='decreasing',\n", + " ),\n", + " tfl.configs.FeatureConfig(\n", + " name='restecg',\n", + " # Partial monotonicity: output(0) \u003c= output(1), output(0) \u003c= output(2)\n", + " monotonicity=[(0, 1), (0, 2)],\n", + " ),\n", + " tfl.configs.FeatureConfig(\n", + " name='exang',\n", + " # Partial monotonicity: output(0) \u003c= output(1)\n", + " monotonicity=[(0, 1)],\n", + " ),\n", + " tfl.configs.FeatureConfig(\n", + " name='oldpeak',\n", + " pwl_calibration_num_keypoints=5,\n", + " monotonicity='increasing',\n", + " ),\n", + " tfl.configs.FeatureConfig(\n", + " name='slope',\n", + " # Partial monotonicity: output(0) \u003c= output(1), output(0) \u003c= output(2)\n", + " monotonicity=[(0, 1), (1, 2)],\n", + " ),\n", + " tfl.configs.FeatureConfig(\n", + " name='ca',\n", + " pwl_calibration_num_keypoints=4,\n", + " monotonicity='increasing',\n", + " ),\n", + " tfl.configs.FeatureConfig(\n", + " name='thal',\n", + " # Partial monotonicity:\n", + " # output(normal) \u003c= output(fixed)\n", + " # output(normal) \u003c= output(reversible) \n", + " monotonicity=[('normal', 'fixed'), ('normal', 'reversible')],\n", + " ),\n", + "]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "LKBULveZ4mr3" + }, + "source": [ + "## Calibrated Linear Model\n", + "\n", + "To construct a TFL canned estimator, construct a model configuration from `tfl.configs`. A calibrated linear model is constructed using `tfl.configs.CalibratedLinearConfig`. It applies piecewise-linear and categorical calibration on the input features, followed by a linear combination and an optional output piecewise-linear calibration. When using output calibration or when output bounds are specified, the linear layer will apply weighted averaging on calibrated inputs.\n", + "\n", + "This example creates a calibrated linear model on the first 5 features. We use\n", + "`tfl.visualization` to plot the model graph with the calibrator plots." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "diRRozio4sAL" + }, + "outputs": [], + "source": [ + "# Model config defines the model strcutre for the estimator.\n", + "model_config = tfl.configs.CalibratedLinearConfig(\n", + " feature_configs=feature_configs,\n", + " use_bias=True,\n", + " output_calibration=True,\n", + " regularizer_configs=[\n", + " # Regularizer for the output calibrator.\n", + " tfl.configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4),\n", + " ])\n", + "# A CannedClassifier is constructed from the given model config.\n", + "estimator = tfl.estimators.CannedClassifier(\n", + " feature_columns=feature_columns[:5],\n", + " model_config=model_config,\n", + " feature_analysis_input_fn=feature_analysis_input_fn,\n", + " optimizer=tf.keras.optimizers.Adam(LEARNING_RATE),\n", + " config=tf.estimator.RunConfig(tf_random_seed=42))\n", + "estimator.train(input_fn=train_input_fn)\n", + "results = estimator.evaluate(input_fn=test_input_fn)\n", + "print('Calibrated linear test AUC: {}'.format(results['auc']))\n", + "saved_model_path = estimator.export_saved_model(estimator.model_dir,\n", + " serving_input_fn)\n", + "model_graph = tfl.estimators.get_model_graph(saved_model_path)\n", + "tfl.visualization.draw_model_graph(model_graph)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "zWzPM2_p977t" + }, + "source": [ + "## Calibrated Lattice Model\n", + "\n", + "A calibrated lattice model is constructed using `tfl.configs.CalibratedLatticeConfig`. A calibrated lattice model applies piecewise-linear and categorical calibration on the input features, followed by a lattice model and an optional output piecewise-linear calibration.\n", + "\n", + "This example creates a calibrated lattice model on the first 5 features.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "C6EvVpKW4BbC" + }, + "outputs": [], + "source": [ + "# This is calibrated lattice model: Inputs are calibrated, then combined\n", + "# non-linearly using a lattice layer.\n", + "model_config = tfl.configs.CalibratedLatticeConfig(\n", + " feature_configs=feature_configs,\n", + " regularizer_configs=[\n", + " # Torsion regularizer applied to the lattice to make it more linear.\n", + " tfl.configs.RegularizerConfig(name='torsion', l2=1e-4),\n", + " # Globally defined calibration regularizer is applied to all features.\n", + " tfl.configs.RegularizerConfig(name='calib_hessian', l2=1e-4),\n", + " ])\n", + "# A CannedClassifier is constructed from the given model config.\n", + "estimator = tfl.estimators.CannedClassifier(\n", + " feature_columns=feature_columns[:5],\n", + " model_config=model_config,\n", + " feature_analysis_input_fn=feature_analysis_input_fn,\n", + " optimizer=tf.keras.optimizers.Adam(LEARNING_RATE),\n", + " config=tf.estimator.RunConfig(tf_random_seed=42))\n", + "estimator.train(input_fn=train_input_fn)\n", + "results = estimator.evaluate(input_fn=test_input_fn)\n", + "print('Calibrated lattice test AUC: {}'.format(results['auc']))\n", + "saved_model_path = estimator.export_saved_model(estimator.model_dir,\n", + " serving_input_fn)\n", + "model_graph = tfl.estimators.get_model_graph(saved_model_path)\n", + "tfl.visualization.draw_model_graph(model_graph)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "9494K_ZBKFcm" + }, + "source": [ + "## Calibrated Lattice Ensemble\n", + "\n", + "When the number of features is large, you can use an ensemble model, which creates multiple smaller lattices for subsets of the features and averages their output instead of creating just a single huge lattice. Ensemble lattice models are constructed using `tfl.configs.CalibratedLatticeEnsembleConfig`. A calibrated lattice ensemble model applies piecewise-linear and categorical calibration on the input feature, followed by an ensemble of lattice models and an optional output piecewise-linear calibration.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "KjrzziMFKuCB" + }, + "source": [ + "### Random Lattice Ensemble\n", + "\n", + "The following model config uses a random subset of features for each lattice." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "YBSS7dLjKExq" + }, + "outputs": [], + "source": [ + "# This is random lattice ensemble model with separate calibration:\n", + "# model output is the average output of separatly calibrated lattices.\n", + "model_config = tfl.configs.CalibratedLatticeEnsembleConfig(\n", + " feature_configs=feature_configs,\n", + " num_lattices=5,\n", + " lattice_rank=3)\n", + "# A CannedClassifier is constructed from the given model config.\n", + "estimator = tfl.estimators.CannedClassifier(\n", + " feature_columns=feature_columns,\n", + " model_config=model_config,\n", + " feature_analysis_input_fn=feature_analysis_input_fn,\n", + " optimizer=tf.keras.optimizers.Adam(LEARNING_RATE),\n", + " config=tf.estimator.RunConfig(tf_random_seed=42))\n", + "estimator.train(input_fn=train_input_fn)\n", + "results = estimator.evaluate(input_fn=test_input_fn)\n", + "print('Random ensemble test AUC: {}'.format(results['auc']))\n", + "saved_model_path = estimator.export_saved_model(estimator.model_dir,\n", + " serving_input_fn)\n", + "model_graph = tfl.estimators.get_model_graph(saved_model_path)\n", + "tfl.visualization.draw_model_graph(model_graph, calibrator_dpi=15)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "LSXEaYAULRvf" + }, + "source": [ + "### Crystals Lattice Ensemble\n", + "\n", + "TFL also provides a heuristic feature arrangement algorithm, called *Crystals*. The Crystals algorithm first trains a *prefitting model* that estimates pairwise feature interactions. It then arranges the final ensemble such that features with more non-linear interactions are in the same lattices.\n", + "\n", + "For Crystals models, you will also need to provide a `prefitting_input_fn` that is used to train the prefitting model, as described above. The prefitting model does not need to be fully trained, so a few epochs should be enough.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "FjQKh9saMaFu" + }, + "outputs": [], + "source": [ + "prefitting_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(\n", + " x=train_x,\n", + " y=train_y,\n", + " shuffle=False,\n", + " batch_size=BATCH_SIZE,\n", + " num_epochs=PREFITTING_NUM_EPOCHS,\n", + " num_threads=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "fVnZpwX8MtPi" + }, + "source": [ + "You can then create a Crystal model by setting `lattice='crystals'` in the model config." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "f4awRMDe-eMv" + }, + "outputs": [], + "source": [ + "# This is Crystals ensemble model with separate calibration: model output is\n", + "# the average output of separatly calibrated lattices.\n", + "model_config = tfl.configs.CalibratedLatticeEnsembleConfig(\n", + " feature_configs=feature_configs,\n", + " lattices='crystals',\n", + " num_lattices=5,\n", + " lattice_rank=3)\n", + "# A CannedClassifier is constructed from the given model config.\n", + "estimator = tfl.estimators.CannedClassifier(\n", + " feature_columns=feature_columns,\n", + " model_config=model_config,\n", + " feature_analysis_input_fn=feature_analysis_input_fn,\n", + " # prefitting_input_fn is required to train the prefitting model.\n", + " prefitting_input_fn=prefitting_input_fn,\n", + " optimizer=tf.keras.optimizers.Adam(LEARNING_RATE),\n", + " prefitting_optimizer=tf.keras.optimizers.Adam(LEARNING_RATE),\n", + " config=tf.estimator.RunConfig(tf_random_seed=42))\n", + "estimator.train(input_fn=train_input_fn)\n", + "results = estimator.evaluate(input_fn=test_input_fn)\n", + "print('Crystals ensemble test AUC: {}'.format(results['auc']))\n", + "saved_model_path = estimator.export_saved_model(estimator.model_dir,\n", + " serving_input_fn)\n", + "model_graph = tfl.estimators.get_model_graph(saved_model_path)\n", + "tfl.visualization.draw_model_graph(model_graph, calibrator_dpi=15)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Isb2vyLAVBM1" + }, + "source": [ + "You can plot feature calibrators with more details using the `tfl.visualization` module." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "DJPaREuWS2sg" + }, + "outputs": [], + "source": [ + "_ = tfl.visualization.plot_feature_calibrator(model_graph, \"age\")\n", + "_ = tfl.visualization.plot_feature_calibrator(model_graph, \"restecg\")" + ] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "name": "tfl_canned_estimators.ipynb", + "private_outputs": true, + "provenance": [ + { + "file_id": "1gw3igUWesgUCASoPM-xRZk6bGg3E1qOX", + "timestamp": 1579554854035 + } + ], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/docs/tutorials/custom_estimators.ipynb b/docs/tutorials/custom_estimators.ipynb new file mode 100644 index 0000000..3850d5a --- /dev/null +++ b/docs/tutorials/custom_estimators.ipynb @@ -0,0 +1,443 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "7765UFHoyGx6" + }, + "source": [ + "##### Copyright 2020 The TensorFlow Authors." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "form", + "colab": {}, + "colab_type": "code", + "id": "KsOkK8O69PyT" + }, + "outputs": [], + "source": [ + "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "ZS8z-_KeywY9" + }, + "source": [ + "# TF Lattice Custom Estimators" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "r61fkA2i9Y3_" + }, + "source": [ + "\u003ctable class=\"tfo-notebook-buttons\" align=\"left\"\u003e\n", + " \u003ctd\u003e\n", + " \u003ca target=\"_blank\" href=\"https://www.tensorflow.org/lattice/tutorials/custom_estimators\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" /\u003eView on TensorFlow.org\u003c/a\u003e\n", + " \u003c/td\u003e\n", + " \u003ctd\u003e\n", + " \u003ca target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/lattice/blob/master/docs/tutorials/custom_estimators.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /\u003eRun in Google Colab\u003c/a\u003e\n", + " \u003c/td\u003e\n", + " \u003ctd\u003e\n", + " \u003ca target=\"_blank\" href=\"https://github.com/tensorflow/lattice/blob/master/docs/tutorials/custom_estimators.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /\u003eView source on GitHub\u003c/a\u003e\n", + " \u003c/td\u003e\n", + " \u003ctd\u003e\n", + " \u003ca href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/lattice/tutorials/custom_estimators.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/download_logo_32px.png\" /\u003eDownload notebook\u003c/a\u003e\n", + " \u003c/td\u003e\n", + "\u003c/table\u003e" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Ur6yCw7YVvr8" + }, + "source": [ + "## Overview\n", + "\n", + "You can use custom estimators to create arbitrarily monotonic models using TFL layers. This guide outlines the steps needed to create such estimators." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "x769lI12IZXB" + }, + "source": [ + "## Setup" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "fbBVAR6UeRN5" + }, + "source": [ + "Installing TF Lattice package:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "bpXjJKpSd3j4" + }, + "outputs": [], + "source": [ + "#@test {\"skip\": true}\n", + "!pip install tensorflow-lattice" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "jSVl9SHTeSGX" + }, + "source": [ + "Importing required packages:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "both", + "colab": {}, + "colab_type": "code", + "id": "P9rMpg1-ASY3" + }, + "outputs": [], + "source": [ + "from __future__ import absolute_import, division, print_function, unicode_literals\n", + "!pip install tensorflow-lattice\n", + "\n", + "try:\n", + " # %tensorflow_version only exists in Colab.\n", + " %tensorflow_version 2.x\n", + "except Exception:\n", + " pass\n", + "import tensorflow as tf\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "import tensorflow_lattice as tfl\n", + "from tensorflow import feature_column as fc\n", + "\n", + "from tensorflow_estimator.python.estimator.canned import optimizers\n", + "from tensorflow_estimator.python.estimator.head import binary_class_head" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "svPuM6QNxlrH" + }, + "source": [ + "Downloading the UCI Statlog (Heart) dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "both", + "colab": {}, + "colab_type": "code", + "id": "M0CmH1gPASZF" + }, + "outputs": [], + "source": [ + "csv_file = tf.keras.utils.get_file(\n", + " 'heart.csv', 'http://storage.googleapis.com/applied-dl/heart.csv')\n", + "df = pd.read_csv(csv_file)\n", + "target = df.pop('target')\n", + "train_size = int(len(df) * 0.8)\n", + "train_x = df[:train_size]\n", + "train_y = target[:train_size]\n", + "test_x = df[train_size:]\n", + "test_y = target[train_size:]\n", + "df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "nKkAw12SxvGG" + }, + "source": [ + "Setting the default values used for training in this guide:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "both", + "colab": {}, + "colab_type": "code", + "id": "1T6GFI9F6mcG" + }, + "outputs": [], + "source": [ + "LEARNING_RATE = 0.1\n", + "BATCH_SIZE = 128\n", + "NUM_EPOCHS = 1000" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "0TGfzhPHzpix" + }, + "source": [ + "## Feature Columns\n", + "\n", + "As for any other TF estimator, data needs to be passed to the estimator, which is typically via an input_fn and parsed using [FeatureColumns](https://www.tensorflow.org/guide/feature_columns)." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "DCIUz8apzs0l" + }, + "outputs": [], + "source": [ + "# Feature columns.\n", + "# - age\n", + "# - sex\n", + "# - ca number of major vessels (0-3) colored by flourosopy\n", + "# - thal 3 = normal; 6 = fixed defect; 7 = reversable defect\n", + "feature_columns = [\n", + " fc.numeric_column('age', default_value=-1),\n", + " fc.categorical_column_with_vocabulary_list('sex', [0, 1]),\n", + " fc.numeric_column('ca'),\n", + " fc.categorical_column_with_vocabulary_list(\n", + " 'thal', ['normal', 'fixed', 'reversible']),\n", + "]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "hEZstmtT2CA3" + }, + "source": [ + "Note that categorical features do not need to be wrapped by a dense feature column, since `tfl.laysers.CategoricalCalibration` layer can directly consume category indices." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "H_LoW_9m5OFL" + }, + "source": [ + "## Creating input_fn\n", + "\n", + "As for any other estimator, you can use input_fn to feed data to the model for training and evaluation." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "lFVy1Efy5NKD" + }, + "outputs": [], + "source": [ + "train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(\n", + " x=train_x,\n", + " y=train_y,\n", + " shuffle=True,\n", + " batch_size=BATCH_SIZE,\n", + " num_epochs=NUM_EPOCHS,\n", + " num_threads=1)\n", + "\n", + "test_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(\n", + " x=test_x,\n", + " y=test_y,\n", + " shuffle=False,\n", + " batch_size=BATCH_SIZE,\n", + " num_epochs=1,\n", + " num_threads=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "kbrgSr9KaRg0" + }, + "source": [ + "## Creating model_fn\n", + "\n", + "There are several ways to create a custom estimator. Here we will construct a `model_fn` that calls a Keras model on the parsed input tensors. To parse the input features, you can use `tf.feature_column.input_layer`, `tf.keras.layers.DenseFeatures`, or `tfl.estimators.transform_features`. If you use the latter, you will not need to wrap categorical features with dense feature columns, and the resulting tensors will not be concatenated, which makes it easier to use the features in the calibration layers.\n", + "\n", + "To construct a model, you can mix and match TFL layers or any other Keras layers. Here we create a calibrated lattice Keras model out of TFL layers and impose several monotonicity constraints. When then use the Keras model to create the custom estimator.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "n2Zrv6OPaQO2" + }, + "outputs": [], + "source": [ + "def model_fn(features, labels, mode, config):\n", + " \"\"\"model_fn for the custom estimator.\"\"\"\n", + " del config\n", + " input_tensors = tfl.estimators.transform_features(features, feature_columns)\n", + " inputs = {\n", + " key: tf.keras.layers.Input(shape=(1,), name=key) for key in input_tensors\n", + " }\n", + "\n", + " lattice_sizes = [3, 2, 2, 2]\n", + " lattice_monotonicities = ['increasing', 'none', 'increasing', 'increasing']\n", + " lattice_input = tf.keras.layers.Concatenate(axis=1)([\n", + " tfl.layers.PWLCalibration(\n", + " input_keypoints=np.linspace(10, 100, num=8, dtype=np.float32),\n", + " # The output range of the calibrator should be the input range of\n", + " # the following lattice dimension.\n", + " output_min=0.0,\n", + " output_max=lattice_sizes[0] - 1.0,\n", + " monotonicity='increasing',\n", + " )(inputs['age']),\n", + " tfl.layers.CategoricalCalibration(\n", + " # Number of categories including any missing/default category.\n", + " num_buckets=2,\n", + " output_min=0.0,\n", + " output_max=lattice_sizes[1] - 1.0,\n", + " )(inputs['sex']),\n", + " tfl.layers.PWLCalibration(\n", + " input_keypoints=[0.0, 1.0, 2.0, 3.0],\n", + " output_min=0.0,\n", + " output_max=lattice_sizes[0] - 1.0,\n", + " # You can specify TFL regularizers as tuple\n", + " # ('regularizer name', l1, l2).\n", + " kernel_regularizer=('hessian', 0.0, 1e-4),\n", + " monotonicity='increasing',\n", + " )(inputs['ca']),\n", + " tfl.layers.CategoricalCalibration(\n", + " num_buckets=3,\n", + " output_min=0.0,\n", + " output_max=lattice_sizes[1] - 1.0,\n", + " # Categorical monotonicity can be partial order.\n", + " # (i, j) indicates that we must have output(i) \u003c= output(j).\n", + " # Make sure to set the lattice monotonicity to 'increasing' for this\n", + " # dimension.\n", + " monotonicities=[(0, 1), (0, 2)],\n", + " )(inputs['thal']),\n", + " ])\n", + " output = tfl.layers.Lattice(\n", + " lattice_sizes=lattice_sizes, monotonicities=lattice_monotonicities)(\n", + " lattice_input)\n", + "\n", + " training = (mode == tf.estimator.ModeKeys.TRAIN)\n", + " model = tf.keras.Model(inputs=inputs, outputs=output)\n", + " logits = model(input_tensors, training=training)\n", + "\n", + " if training:\n", + " optimizer = optimizers.get_optimizer_instance_v2('Adagrad', LEARNING_RATE)\n", + " else:\n", + " optimizer = None\n", + "\n", + " head = binary_class_head.BinaryClassHead()\n", + " return head.create_estimator_spec(\n", + " features=features,\n", + " mode=mode,\n", + " labels=labels,\n", + " optimizer=optimizer,\n", + " logits=logits,\n", + " trainable_variables=model.trainable_variables,\n", + " update_ops=model.updates)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "mng-VtsSbVtQ" + }, + "source": [ + "## Training and Estimator\n", + "\n", + "Using the `model_fn` we can create and train the estimator." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "j38GaEbKbZju" + }, + "outputs": [], + "source": [ + "estimator = tf.estimator.Estimator(model_fn=model_fn)\n", + "estimator.train(input_fn=train_input_fn)\n", + "results = estimator.evaluate(input_fn=test_input_fn)\n", + "print('AUC: {}'.format(results['auc']))" + ] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "name": "tfl_custom_estimators.ipynb", + "private_outputs": true, + "provenance": [ + { + "file_id": "1YQhpyfKAW4Gz49gDFMJtVSpAM-Zi12h9", + "timestamp": 1579559437099 + } + ], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/docs/tutorials/keras_layers.ipynb b/docs/tutorials/keras_layers.ipynb new file mode 100644 index 0000000..6d96515 --- /dev/null +++ b/docs/tutorials/keras_layers.ipynb @@ -0,0 +1,838 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "7765UFHoyGx6" + }, + "source": [ + "##### Copyright 2020 The TensorFlow Authors." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "form", + "colab": {}, + "colab_type": "code", + "id": "KsOkK8O69PyT" + }, + "outputs": [], + "source": [ + "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "ZS8z-_KeywY9" + }, + "source": [ + "# Creating Keras Models with TFL Layers" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "r61fkA2i9Y3_" + }, + "source": [ + "\u003ctable class=\"tfo-notebook-buttons\" align=\"left\"\u003e\n", + " \u003ctd\u003e\n", + " \u003ca target=\"_blank\" href=\"https://www.tensorflow.org/lattice/tutorials/keras_layers\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" /\u003eView on TensorFlow.org\u003c/a\u003e\n", + " \u003c/td\u003e\n", + " \u003ctd\u003e\n", + " \u003ca target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/lattice/blob/master/docs/tutorials/keras_layers.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /\u003eRun in Google Colab\u003c/a\u003e\n", + " \u003c/td\u003e\n", + " \u003ctd\u003e\n", + " \u003ca target=\"_blank\" href=\"https://github.com/tensorflow/lattice/blob/master/docs/tutorials/keras_layers.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /\u003eView source on GitHub\u003c/a\u003e\n", + " \u003c/td\u003e\n", + " \u003ctd\u003e\n", + " \u003ca href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/lattice/tutorials/keras_layers.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/download_logo_32px.png\" /\u003eDownload notebook\u003c/a\u003e\n", + " \u003c/td\u003e\n", + "\u003c/table\u003e" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "ecLbJCvJSSCd" + }, + "source": [ + "##Overview\n", + "\n", + "You can use TFL Keras layers to construct Keras models with monotonicity and other shape constraints. This example builds and trains a calibrated lattice model for the UCI heart dataset using TFL layers.\n", + "\n", + "In a calibrated lattice model, each feature is transformed by a `tfl.layers.PWLCalibration` or a `tfl.layers.CategoricalCalibration` layer and the results are nonlinearly fused using a `tfl.layers.Lattice`." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "x769lI12IZXB" + }, + "source": [ + "## Setup" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "fbBVAR6UeRN5" + }, + "source": [ + "Installing TF Lattice package:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "bpXjJKpSd3j4" + }, + "outputs": [], + "source": [ + "#@test {\"skip\": true}\n", + "!pip install tensorflow-lattice" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "jSVl9SHTeSGX" + }, + "source": [ + "Importing required packages:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "both", + "colab": {}, + "colab_type": "code", + "id": "pm0LD8iyIZXF" + }, + "outputs": [], + "source": [ + "from __future__ import absolute_import, division, print_function, unicode_literals\n", + "\n", + "try:\n", + " # %tensorflow_version only exists in Colab.\n", + " %tensorflow_version 2.x\n", + "except Exception:\n", + " pass\n", + "import tensorflow as tf\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "import tensorflow_lattice as tfl\n", + "from tensorflow import feature_column as fc" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "svPuM6QNxlrH" + }, + "source": [ + "Downloading the UCI Statlog (Heart) dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "both", + "colab": {}, + "colab_type": "code", + "id": "PG3pFtK-IZXM" + }, + "outputs": [], + "source": [ + "# UCI Statlog (Heart) dataset.\n", + "csv_file = tf.keras.utils.get_file(\n", + " 'heart.csv', 'http://storage.googleapis.com/applied-dl/heart.csv')\n", + "training_data_df = pd.read_csv(csv_file).sample(\n", + " frac=1.0, random_state=41).reset_index(drop=True)\n", + "training_data_df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "nKkAw12SxvGG" + }, + "source": [ + "Setting the default values used for training in this guide:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "both", + "colab": {}, + "colab_type": "code", + "id": "krAJBE-yIZXR" + }, + "outputs": [], + "source": [ + "LEARNING_RATE = 0.1\n", + "BATCH_SIZE = 128\n", + "NUM_EPOCHS = 100" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "0TGfzhPHzpix" + }, + "source": [ + "## Sequential Keras Model\n", + "\n", + "This example creates a Sequential Keras model and only uses TFL layers.\n", + "\n", + "Lattice layers expect `input[i]` to be within `[0, lattice_sizes[i] - 1.0]`, so we need to define the lattice sizes ahead of the calibration layers so we can properly specify output range of the calibration layers.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "nOQWqPAbQS3o" + }, + "outputs": [], + "source": [ + "# Lattice layer expects input[i] to be within [0, lattice_sizes[i] - 1.0], so\n", + "lattice_sizes = [3, 2, 2, 2, 2, 2, 2]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "W3DnEKWvQYXm" + }, + "source": [ + "We use a `tfl.layers.ParallelCombination` layer to group together calibration layers which have to be executed in paralel in order to be able to create a Sequential model.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "o_hyk5GkQfl8" + }, + "outputs": [], + "source": [ + "combined_calibrators = tfl.layers.ParallelCombination()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "BPZsSUZiQiwc" + }, + "source": [ + "We create a calibration layer for each feature and add it to the parallel combination layer. For numeric features we use `tfl.layers.PWLCalibration` and for categorical features we use `tfl.layers.CategoricalCalibration`." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "DXPc6rSGxzFZ" + }, + "outputs": [], + "source": [ + "# ############### age ###############\n", + "calibrator = tfl.layers.PWLCalibration(\n", + " # Every PWLCalibration layer must have keypoints of piecewise linear\n", + " # function specified. Easiest way to specify them is to uniformly cover\n", + " # entire input range by using numpy.linspace().\n", + " input_keypoints=np.linspace(\n", + " training_data_df['age'].min(), training_data_df['age'].max(), num=5),\n", + " # You need to ensure that input keypoints have same dtype as layer input.\n", + " # You can do it by setting dtype here or by providing keypoints in such\n", + " # format which will be converted to deisred tf.dtype by default.\n", + " dtype=tf.float32,\n", + " # Output range must correspond to expected lattice input range.\n", + " output_min=0.0,\n", + " output_max=lattice_sizes[0] - 1.0,\n", + ")\n", + "combined_calibrators.append(calibrator)\n", + "\n", + "# ############### sex ###############\n", + "# For boolean features simply specify CategoricalCalibration layer with 2\n", + "# buckets.\n", + "calibrator = tfl.layers.CategoricalCalibration(\n", + " num_buckets=2,\n", + " output_min=0.0,\n", + " output_max=lattice_sizes[1] - 1.0,\n", + " # Initializes all outputs to (output_min + output_max) / 2.0.\n", + " kernel_initializer='constant')\n", + "combined_calibrators.append(calibrator)\n", + "\n", + "# ############### cp ###############\n", + "calibrator = tfl.layers.PWLCalibration(\n", + " # Here instead of specifying dtype of layer we convert keypoints into\n", + " # np.float32.\n", + " input_keypoints=np.linspace(1, 4, num=4, dtype=np.float32),\n", + " output_min=0.0,\n", + " output_max=lattice_sizes[2] - 1.0,\n", + " monotonicity='increasing',\n", + " # You can specify TFL regularizers as a tuple ('regularizer name', l1, l2).\n", + " kernel_regularizer=('hessian', 0.0, 1e-4))\n", + "combined_calibrators.append(calibrator)\n", + "\n", + "# ############### trestbps ###############\n", + "calibrator = tfl.layers.PWLCalibration(\n", + " # Alternatively, you might want to use quantiles as keypoints instead of\n", + " # uniform keypoints\n", + " input_keypoints=np.quantile(training_data_df['trestbps'],\n", + " np.linspace(0.0, 1.0, num=5)),\n", + " dtype=tf.float32,\n", + " # Together with quantile keypoints you might want to initialize piecewise\n", + " # linear function to have 'equal_slopes' in order for output of layer\n", + " # after initialization to preserve original distribution.\n", + " kernel_initializer='equal_slopes',\n", + " output_min=0.0,\n", + " output_max=lattice_sizes[3] - 1.0,\n", + " # You might consider clamping extreme inputs of the calibrator to output\n", + " # bounds.\n", + " clamp_min=True,\n", + " clamp_max=True,\n", + " monotonicity='increasing')\n", + "combined_calibrators.append(calibrator)\n", + "\n", + "# ############### chol ###############\n", + "calibrator = tfl.layers.PWLCalibration(\n", + " # Explicit input keypoint initialization.\n", + " input_keypoints=[126.0, 210.0, 247.0, 286.0, 564.0],\n", + " dtype=tf.float32,\n", + " output_min=0.0,\n", + " output_max=lattice_sizes[4] - 1.0,\n", + " # Monotonicity of calibrator can be decreasing. Note that corresponding\n", + " # lattice dimension must have INCREASING monotonicity regardless of\n", + " # monotonicity direction of calibrator.\n", + " monotonicity='decreasing',\n", + " # Convexity together with decreasing monotonicity result in diminishing\n", + " # return constraint.\n", + " convexity='convex',\n", + " # You can specify list of regularizers. You are not limited to TFL\n", + " # regularizrs. Feel free to use any :)\n", + " kernel_regularizer=[('laplacian', 0.0, 1e-4),\n", + " tf.keras.regularizers.l1_l2(l1=0.001)])\n", + "combined_calibrators.append(calibrator)\n", + "\n", + "# ############### fbs ###############\n", + "calibrator = tfl.layers.CategoricalCalibration(\n", + " num_buckets=2,\n", + " output_min=0.0,\n", + " output_max=lattice_sizes[5] - 1.0,\n", + " # For categorical calibration layer monotonicity is specified for pairs\n", + " # of indices of categories. Output for first category in pair will be\n", + " # smaller than output for second category.\n", + " #\n", + " # Don't forget to set monotonicity of corresponding dimension of Lattice\n", + " # layer to '1'.\n", + " monotonicities=[(0, 1)],\n", + " # This initializer is identical to default one('uniform'), but has fixed\n", + " # seed in order to simplify experimentation.\n", + " kernel_initializer=tf.keras.initializers.RandomUniform(\n", + " minval=0.0, maxval=lattice_sizes[5] - 1.0, seed=1))\n", + "combined_calibrators.append(calibrator)\n", + "\n", + "# ############### restecg ###############\n", + "calibrator = tfl.layers.CategoricalCalibration(\n", + " num_buckets=3,\n", + " output_min=0.0,\n", + " output_max=lattice_sizes[6] - 1.0,\n", + " # Categorical monotonicity can be partial order.\n", + " monotonicities=[(0, 1), (0, 2)],\n", + " # Categorical calibration layer supports standard Keras regularizers.\n", + " kernel_regularizer=tf.keras.regularizers.l1_l2(l1=0.001),\n", + " kernel_initializer='constant')\n", + "combined_calibrators.append(calibrator)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "inyNlSBeQyp7" + }, + "source": [ + "We then create a lattice layer to nonlinearly fuse the outputs of the calibrators.\n", + "\n", + "Note that we need to specify the monotonicity of the lattice to be increasing for required dimensions. The composition with the direction of the monotonicity in the calibration will result in the correct end-to-end direction of monotonicity. This includes partial monotonicity of CategoricalCalibration layer." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "DNCc9oBTRo6w" + }, + "outputs": [], + "source": [ + "lattice = tfl.layers.Lattice(\n", + " lattice_sizes=lattice_sizes,\n", + " monotonicities=[\n", + " 'increasing', 'none', 'increasing', 'increasing', 'increasing',\n", + " 'increasing', 'increasing'\n", + " ],\n", + " output_min=0.0,\n", + " output_max=1.0)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "T5q2InayRpDr" + }, + "source": [ + "We can then create a sequential model using the combined calibrators and lattice layers." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "xX6lroYZQy3L" + }, + "outputs": [], + "source": [ + "model = tf.keras.models.Sequential()\n", + "model.add(combined_calibrators)\n", + "model.add(lattice)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "W3UFxD3fRzIC" + }, + "source": [ + "Training works the same as any other keras model." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "2jz4JvI-RzSj" + }, + "outputs": [], + "source": [ + "features = training_data_df[[\n", + " 'age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg'\n", + "]].values.astype(np.float32)\n", + "target = training_data_df[['target']].values.astype(np.float32)\n", + "\n", + "model.compile(\n", + " loss=tf.keras.losses.mean_squared_error,\n", + " optimizer=tf.keras.optimizers.Adagrad(learning_rate=LEARNING_RATE))\n", + "model.fit(\n", + " features,\n", + " target,\n", + " batch_size=BATCH_SIZE,\n", + " epochs=NUM_EPOCHS,\n", + " validation_split=0.2,\n", + " shuffle=False,\n", + " verbose=0)\n", + "\n", + "model.evaluate(features, target)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "RTHoW_5lxwT5" + }, + "source": [ + "## Functional Keras Model\n", + "\n", + "This example uses a functional API for Keras model construction.\n", + "\n", + "As mentioned in the previous section, lattice layers expect `input[i]` to be within `[0, lattice_sizes[i] - 1.0]`, so we need to define the lattice sizes ahead of the calibration layers so we can properly specify output range of the calibration layers." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "gJjUYvBuW1qE" + }, + "outputs": [], + "source": [ + "# We are going to have 2-d embedding as one of lattice inputs.\n", + "lattice_sizes = [3, 2, 2, 3, 3, 2, 2]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Z03qY5MYW1yT" + }, + "source": [ + "For each feature, we need to create an input layer followed by a calibration layer. For numeric features we use `tfl.layers.PWLCalibration` and for categorical features we use `tfl.layers.CategoricalCalibration`." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "DCIUz8apzs0l" + }, + "outputs": [], + "source": [ + "model_inputs = []\n", + "lattice_inputs = []\n", + "# ############### age ###############\n", + "age_input = tf.keras.layers.Input(shape=[1], name='age')\n", + "model_inputs.append(age_input)\n", + "age_calibrator = tfl.layers.PWLCalibration(\n", + " # Every PWLCalibration layer must have keypoints of piecewise linear\n", + " # function specified. Easiest way to specify them is to uniformly cover\n", + " # entire input range by using numpy.linspace().\n", + " input_keypoints=np.linspace(\n", + " training_data_df['age'].min(), training_data_df['age'].max(), num=5),\n", + " # You need to ensure that input keypoints have same dtype as layer input.\n", + " # You can do it by setting dtype here or by providing keypoints in such\n", + " # format which will be converted to deisred tf.dtype by default.\n", + " dtype=tf.float32,\n", + " # Output range must correspond to expected lattice input range.\n", + " output_min=0.0,\n", + " output_max=lattice_sizes[0] - 1.0,\n", + " monotonicity='increasing',\n", + " name='age_calib',\n", + ")(\n", + " age_input)\n", + "lattice_inputs.append(age_calibrator)\n", + "\n", + "# ############### sex ###############\n", + "# For boolean features simply specify CategoricalCalibration layer with 2\n", + "# buckets.\n", + "sex_input = tf.keras.layers.Input(shape=[1], name='sex')\n", + "model_inputs.append(sex_input)\n", + "sex_calibrator = tfl.layers.CategoricalCalibration(\n", + " num_buckets=2,\n", + " output_min=0.0,\n", + " output_max=lattice_sizes[1] - 1.0,\n", + " # Initializes all outputs to (output_min + output_max) / 2.0.\n", + " kernel_initializer='constant',\n", + " name='sex_calib',\n", + ")(\n", + " sex_input)\n", + "lattice_inputs.append(sex_calibrator)\n", + "\n", + "# ############### cp ###############\n", + "cp_input = tf.keras.layers.Input(shape=[1], name='cp')\n", + "model_inputs.append(cp_input)\n", + "cp_calibrator = tfl.layers.PWLCalibration(\n", + " # Here instead of specifying dtype of layer we convert keypoints into\n", + " # np.float32.\n", + " input_keypoints=np.linspace(1, 4, num=4, dtype=np.float32),\n", + " output_min=0.0,\n", + " output_max=lattice_sizes[2] - 1.0,\n", + " monotonicity='increasing',\n", + " # You can specify TFL regularizers as tuple ('regularizer name', l1, l2).\n", + " kernel_regularizer=('hessian', 0.0, 1e-4),\n", + " name='cp_calib',\n", + ")(\n", + " cp_input)\n", + "lattice_inputs.append(cp_calibrator)\n", + "\n", + "# ############### trestbps ###############\n", + "trestbps_input = tf.keras.layers.Input(shape=[1], name='trestbps')\n", + "model_inputs.append(trestbps_input)\n", + "trestbps_calibrator = tfl.layers.PWLCalibration(\n", + " # Alternatively, you might want to use quantiles as keypoints instead of\n", + " # uniform keypoints\n", + " input_keypoints=np.quantile(training_data_df['trestbps'],\n", + " np.linspace(0.0, 1.0, num=5)),\n", + " dtype=tf.float32,\n", + " # Together with quantile keypoints you might want to initialize piecewise\n", + " # linear function to have 'equal_slopes' in order for output of layer\n", + " # after initialization to preserve original distribution.\n", + " kernel_initializer='equal_slopes',\n", + " output_min=0.0,\n", + " output_max=lattice_sizes[3] - 1.0,\n", + " # You might consider clamping extreme inputs of the calibrator to output\n", + " # bounds.\n", + " clamp_min=True,\n", + " clamp_max=True,\n", + " monotonicity='increasing',\n", + " name='trestbps_calib',\n", + ")(\n", + " trestbps_input)\n", + "lattice_inputs.append(trestbps_calibrator)\n", + "\n", + "# ############### chol ###############\n", + "chol_input = tf.keras.layers.Input(shape=[1], name='chol')\n", + "model_inputs.append(chol_input)\n", + "chol_calibrator = tfl.layers.PWLCalibration(\n", + " # Explicit input keypoint initialization.\n", + " input_keypoints=[126.0, 210.0, 247.0, 286.0, 564.0],\n", + " output_min=0.0,\n", + " output_max=lattice_sizes[4] - 1.0,\n", + " # Monotonicity of calibrator can be decreasing. Note that corresponding\n", + " # lattice dimension must have INCREASING monotonicity regardless of\n", + " # monotonicity direction of calibrator.\n", + " monotonicity='decreasing',\n", + " # Convexity together with decreasing monotonicity result in diminishing\n", + " # return constraint.\n", + " convexity='convex',\n", + " # You can specify list of regularizers. You are not limited to TFL\n", + " # regularizrs. Feel free to use any :)\n", + " kernel_regularizer=[('laplacian', 0.0, 1e-4),\n", + " tf.keras.regularizers.l1_l2(l1=0.001)],\n", + " name='chol_calib',\n", + ")(\n", + " chol_input)\n", + "lattice_inputs.append(chol_calibrator)\n", + "\n", + "# ############### fbs ###############\n", + "fbs_input = tf.keras.layers.Input(shape=[1], name='fbs')\n", + "model_inputs.append(fbs_input)\n", + "fbs_calibrator = tfl.layers.CategoricalCalibration(\n", + " num_buckets=2,\n", + " output_min=0.0,\n", + " output_max=lattice_sizes[5] - 1.0,\n", + " # For categorical calibration layer monotonicity is specified for pairs\n", + " # of indices of categories. Output for first category in pair will be\n", + " # smaller than output for second category.\n", + " #\n", + " # Don't forget to set monotonicity of corresponding dimension of Lattice\n", + " # layer to '1'.\n", + " monotonicities=[(0, 1)],\n", + " # This initializer is identical to default one ('uniform'), but has fixed\n", + " # seed in order to simplify experimentation.\n", + " kernel_initializer=tf.keras.initializers.RandomUniform(\n", + " minval=0.0, maxval=lattice_sizes[5] - 1.0, seed=1),\n", + " name='fbs_calib',\n", + ")(\n", + " fbs_input)\n", + "lattice_inputs.append(fbs_calibrator)\n", + "\n", + "# ############### restecg ###############\n", + "restecg_input = tf.keras.layers.Input(shape=[1], name='restecg')\n", + "model_inputs.append(restecg_input)\n", + "restecg_calibrator = tfl.layers.CategoricalCalibration(\n", + " num_buckets=3,\n", + " output_min=0.0,\n", + " output_max=lattice_sizes[6] - 1.0,\n", + " # Categorical monotonicity can be partial order.\n", + " monotonicities=[(0, 1), (0, 2)],\n", + " # Categorical calibration layer supports standard Keras regularizers.\n", + " kernel_regularizer=tf.keras.regularizers.l1_l2(l1=0.001),\n", + " kernel_initializer='constant',\n", + " name='restecg_calib',\n", + ")(\n", + " restecg_input)\n", + "lattice_inputs.append(restecg_calibrator)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Fr0k8La_YgQG" + }, + "source": [ + "We then create a lattice layer to nonlinearly fuse the outputs of the calibrators.\n", + "\n", + "Note that we need to specify the monotonicity of the lattice to be increasing for required dimensions. The composition with the direction of the monotonicity in the calibration will result in the correct end-to-end direction of monotonicity. This includes partial monotonicity of `tfl.layers.CategoricalCalibration` layer." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "X15RE0NybNbU" + }, + "outputs": [], + "source": [ + "lattice = tfl.layers.Lattice(\n", + " lattice_sizes=lattice_sizes,\n", + " monotonicities=[\n", + " 'increasing', 'none', 'increasing', 'increasing', 'increasing',\n", + " 'increasing', 'increasing'\n", + " ],\n", + " output_min=0.0,\n", + " output_max=1.0,\n", + " name='lattice',\n", + ")(\n", + " lattice_inputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "31VzsnMCA9dh" + }, + "source": [ + "To add more flexibility to the model, we add an output calibration layer." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "efCP3Yx2A9n7" + }, + "outputs": [], + "source": [ + "model_output = tfl.layers.PWLCalibration(\n", + " input_keypoints=np.linspace(0.0, 1.0, 5),\n", + " name='output_calib',\n", + ")(\n", + " lattice)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "1SURnNl8bNgw" + }, + "source": [ + "We can now create a model using the inputs and outputs." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "7gY-VXuYbZLa" + }, + "outputs": [], + "source": [ + "model = tf.keras.models.Model(\n", + " inputs=model_inputs,\n", + " outputs=model_output)\n", + "tf.keras.utils.plot_model(model, rankdir='LR')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "tvFJTs94bZXK" + }, + "source": [ + "Training works the same as any other keras model. Note that, with our setup, input features are passed as separate tensors." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "vMQTGbFAYgYS" + }, + "outputs": [], + "source": [ + "feature_names = ['age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg']\n", + "features = np.split(\n", + " training_data_df[feature_names].values.astype(np.float32),\n", + " indices_or_sections=len(feature_names),\n", + " axis=1)\n", + "target = training_data_df[['target']].values.astype(np.float32)\n", + "\n", + "model.compile(\n", + " loss=tf.keras.losses.mean_squared_error,\n", + " optimizer=tf.keras.optimizers.Adagrad(LEARNING_RATE))\n", + "model.fit(\n", + " features,\n", + " target,\n", + " batch_size=BATCH_SIZE,\n", + " epochs=NUM_EPOCHS,\n", + " validation_split=0.2,\n", + " shuffle=False,\n", + " verbose=0)\n", + "\n", + "model.evaluate(features, target)" + ] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "name": "tfl_keras_layers.ipynb", + "private_outputs": true, + "provenance": [ + { + "file_id": "1ov3qXThgltj77os4ULx7nI63f3oM0vsc", + "timestamp": 1579561232062 + }, + { + "file_id": "1YQhpyfKAW4Gz49gDFMJtVSpAM-Zi12h9", + "timestamp": 1579117071304 + } + ], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/docs/tutorials/shape_constraints.ipynb b/docs/tutorials/shape_constraints.ipynb new file mode 100644 index 0000000..0065400 --- /dev/null +++ b/docs/tutorials/shape_constraints.ipynb @@ -0,0 +1,1262 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "7765UFHoyGx6" + }, + "source": [ + "##### Copyright 2020 The TensorFlow Authors." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "form", + "colab": {}, + "colab_type": "code", + "id": "KsOkK8O69PyT" + }, + "outputs": [], + "source": [ + "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "RKQpW0JqQQmY" + }, + "source": [ + "# Shape Constraints with Tensorflow Lattice\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "r61fkA2i9Y3_" + }, + "source": [ + "\u003ctable class=\"tfo-notebook-buttons\" align=\"left\"\u003e\n", + " \u003ctd\u003e\n", + " \u003ca target=\"_blank\" href=\"https://www.tensorflow.org/lattice/tutorials/shape_constraints\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" /\u003eView on TensorFlow.org\u003c/a\u003e\n", + " \u003c/td\u003e\n", + " \u003ctd\u003e\n", + " \u003ca target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/lattice/blob/master/docs/tutorials/shape_constraints.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /\u003eRun in Google Colab\u003c/a\u003e\n", + " \u003c/td\u003e\n", + " \u003ctd\u003e\n", + " \u003ca target=\"_blank\" href=\"https://github.com/tensorflow/lattice/blob/master/docs/tutorials/shape_constraints.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /\u003eView source on GitHub\u003c/a\u003e\n", + " \u003c/td\u003e\n", + " \u003ctd\u003e\n", + " \u003ca href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/lattice/tutorials/shape_constraints.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/download_logo_32px.png\" /\u003eDownload notebook\u003c/a\u003e\n", + " \u003c/td\u003e\n", + "\u003c/table\u003e" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "2plcL3iTVjsp" + }, + "source": [ + "## Overview\n", + "\n", + "In this colab we will fit a TensorFlow Lattice (TFL) canned classifier on a handcrafted restaurant review rating dataset and experiment with various TFL regularizers and shape constraints. Before proceeding, make sure your runtime has all required packages installed (as imported in the code cells below)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "x769lI12IZXB" + }, + "source": [ + "## Setup" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "fbBVAR6UeRN5" + }, + "source": [ + "Installing TF Lattice package:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "bpXjJKpSd3j4" + }, + "outputs": [], + "source": [ + "#@test {\"skip\": true}\n", + "!pip install tensorflow-lattice" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "jSVl9SHTeSGX" + }, + "source": [ + "Importing required packages:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "both", + "colab": {}, + "colab_type": "code", + "id": "iY6awAl058TV" + }, + "outputs": [], + "source": [ + "from __future__ import absolute_import, division, print_function, unicode_literals\n", + "!pip install tensorflow-lattice\n", + "\n", + "try:\n", + " # %tensorflow_version only exists in Colab.\n", + " %tensorflow_version 2.x\n", + "except Exception:\n", + " pass\n", + "import tensorflow as tf\n", + "\n", + "from IPython.core.pylabtools import figsize\n", + "import itertools\n", + "import matplotlib\n", + "from matplotlib import pyplot as plt\n", + "import numpy as np\n", + "import pandas as pd\n", + "import tensorflow_lattice as tfl" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "7TmBk_IGgJF0" + }, + "source": [ + "Default values used in this guide:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "kQHPyPsPUF92" + }, + "outputs": [], + "source": [ + "NUM_EPOCHS = 500\n", + "BATCH_SIZE = 64\n", + "LEARNING_RATE=0.001" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "FjR7D8Ag3z0d" + }, + "source": [ + "## Training Dataset for Ranking Restaurants" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "a1YetzbdFOij" + }, + "source": [ + "Imagine a simplified scenario where we want to determine whether or not users will click on a restaurant search. The task is to predict the clickthrough rate (CTR) given input features:\n", + "- Average rating (`avg_rating`): a numeric (float) feature with values in the range [1,5].\n", + "- Number of reviews (`num_reviews`): a positive numeric (integer) feature with values capped at 200, and used as a measure of trendiness.\n", + "- Dollar rating (`dollar_rating`): a categorical feature with string values in the set {\"D\", \"DD\", \"DDD\", \"DDDD\"}.\n", + "\n", + "Here we create a synthetic dataset where the true CTR is given by the formula:\n", + "$$\n", + "CTR = \\frac{1}{1 + exp\\{\\mbox{b(dollar_rating)}-\\mbox{avg_rating}\\times log(\\mbox{num_reviews}) /4 \\}}, \n", + "$$\n", + "where $b(\\cdot)$ translates each `dollar_rating` to a baseline value:\n", + "$$\n", + "\\mbox{D}\\to 3,\\ \\mbox{DD}\\to 2,\\ \\mbox{DDD}\\to 4,\\ \\mbox{DDDD}\\to 4.5. \n", + "$$\n", + "\n", + "This formula reflects typical user patterns. e.g. given everything else fixed, \"\\\\$\\\\$\" restaurants will receive more clicks than \"\\\\$\", followed by \"\\\\$\\\\$\\\\$\" and \"\\\\$\\\\$\\\\$\\\\$\". " + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "mKovnyv1jATw" + }, + "outputs": [], + "source": [ + "def click_through_rate(avg_ratings, num_reviews, dollar_ratings):\n", + " dollar_rating_baseline = {\"D\": 3, \"DD\": 2, \"DDD\": 4, \"DDDD\": 4.5}\n", + " return 1 / (1 + np.exp(\n", + " np.array([dollar_rating_baseline[d] for d in dollar_ratings]) -\n", + " avg_ratings * np.log1p(num_reviews) / 4))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "BPlgRdt6jAbP" + }, + "source": [ + "Let's take a look at the contour plots of this CTR function." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "KC5qX_XKmc7g" + }, + "outputs": [], + "source": [ + "def color_bar():\n", + " bar = matplotlib.cm.ScalarMappable(\n", + " norm=matplotlib.colors.Normalize(0, 1, True),\n", + " cmap=\"viridis\",\n", + " )\n", + " bar.set_array([0, 1])\n", + " return bar\n", + "\n", + "\n", + "def plot_fns(fns, split_by_dollar=False, res=25):\n", + " \"\"\"Generates contour plots for a list of (name, fn) functions.\"\"\"\n", + " num_reviews, avg_ratings = np.meshgrid(\n", + " np.linspace(0, 200, num=res),\n", + " np.linspace(1, 5, num=res),\n", + " )\n", + " if split_by_dollar:\n", + " dollar_rating_splits = [\"D\", \"DD\", \"DDD\", \"DDDD\"]\n", + " else:\n", + " dollar_rating_splits = [None]\n", + " if len(fns) == 1:\n", + " fig, axes = plt.subplots(2, 2, sharey=True, tight_layout=False)\n", + " else:\n", + " fig, axes = plt.subplots(\n", + " len(dollar_rating_splits), len(fns), sharey=True, tight_layout=False)\n", + " axes = axes.flatten()\n", + " axes_index = 0\n", + " for dollar_rating_split in dollar_rating_splits:\n", + " for title, fn in fns:\n", + " if dollar_rating_split is not None:\n", + " dollar_ratings = np.repeat(dollar_rating_split, res**2)\n", + " values = fn(avg_ratings.flatten(), num_reviews.flatten(),\n", + " dollar_ratings)\n", + " title = \"{}: dollar_rating={}\".format(title, dollar_rating_split)\n", + " else:\n", + " values = fn(avg_ratings.flatten(), num_reviews.flatten())\n", + " subplot = axes[axes_index]\n", + " axes_index += 1\n", + " subplot.contourf(\n", + " avg_ratings,\n", + " num_reviews,\n", + " np.reshape(values, (res, res)),\n", + " vmin=0,\n", + " vmax=1)\n", + " subplot.title.set_text(title)\n", + " subplot.set(xlabel=\"Average Rating\")\n", + " subplot.set(ylabel=\"Number of Reviews\")\n", + " subplot.set(xlim=(1, 5))\n", + "\n", + " _ = fig.colorbar(color_bar(), cax=fig.add_axes([0.95, 0.2, 0.01, 0.6]))\n", + "\n", + "\n", + "figsize(11, 11)\n", + "plot_fns([(\"CTR\", click_through_rate)], split_by_dollar=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Ol91olp3muNN" + }, + "source": [ + "### Preparing Data\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "H8BOshZS9xwn" + }, + "source": [ + "We start by generating a simulated dataset of restaurants and their features." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "MhqcOPdTT_wj" + }, + "outputs": [], + "source": [ + "def sample_restaurants(n):\n", + " avg_ratings = np.random.uniform(1.0, 5.0, n)\n", + " num_reviews = np.round(np.exp(np.random.uniform(0.0, np.log(200), n)))\n", + " dollar_ratings = np.random.choice([\"D\", \"DD\", \"DDD\", \"DDDD\"], n)\n", + " ctr_labels = click_through_rate(avg_ratings, num_reviews, dollar_ratings)\n", + " return avg_ratings, num_reviews, dollar_ratings, ctr_labels\n", + "\n", + "\n", + "np.random.seed(42)\n", + "avg_ratings, num_reviews, dollar_ratings, ctr_labels = sample_restaurants(2000)\n", + "\n", + "figsize(5, 5)\n", + "fig, axs = plt.subplots(1, 1, sharey=False, tight_layout=False)\n", + "for rating, marker in [(\"D\", \"o\"), (\"DD\", \"^\"), (\"DDD\", \"+\"), (\"DDDD\", \"x\")]:\n", + " plt.scatter(\n", + " x=avg_ratings[np.where(dollar_ratings == rating)],\n", + " y=num_reviews[np.where(dollar_ratings == rating)],\n", + " c=ctr_labels[np.where(dollar_ratings == rating)],\n", + " vmin=0,\n", + " vmax=1,\n", + " marker=marker,\n", + " label=rating)\n", + "plt.xlabel(\"Average Rating\")\n", + "plt.ylabel(\"Number of Reviews\")\n", + "plt.legend()\n", + "plt.xlim((1, 5))\n", + "plt.title(\"Distribution of restaurants\")\n", + "_ = fig.colorbar(color_bar(), cax=fig.add_axes([0.95, 0.2, 0.01, 0.6]))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "tRetsfLv_JSR" + }, + "source": [ + "Let's produce the training, validation and testing datasets. When a restaurant is viewed in the search results, we can record user's engagement (click or no click) as a sample point. \n", + "\n", + "In practice, users often do not go through all search results. This means that users will likely only see restaurants already considered \"good\" by the current ranking model in use. As a result, \"good\" restaurants are more frequently impressed and over-represented in the training datasets.\n", + "\n", + "When the model is used for ranking, it is often evaluated on all relevant results with a more uniform distribution. As a result, it may act unexpectedly at evaluation time for cases that were are under-represented in the training dataset. When using more features, the training dataset can have large gaps in \"bad\" parts of the feature space.\n", + "\n", + "A flexible and complicated model might fail in this case due to overfitting the over-represented data points and thus lacking generalizability. We handle this issue by applying domain knowledge to add *shape constraints* that guide the trained model to make reasonable predictions *when it cannot pick them up from the training dataset*.\n", + "\n", + "In this example, for the testing dataset we intentionally ignore the over-representation to simulate the online setting previously discussed." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "jS6WOtXQ8jwX" + }, + "outputs": [], + "source": [ + "def sample_dataset(n, testing_set):\n", + " (avg_ratings, num_reviews, dollar_ratings, ctr_labels) = sample_restaurants(n)\n", + " if testing_set:\n", + " # Testing has a more uniform distribution over all restaurants.\n", + " num_views = np.random.poisson(lam=3, size=n)\n", + " else:\n", + " # Training/validation datasets have more views on popular restaurants.\n", + " num_views = np.random.poisson(lam=ctr_labels * num_reviews / 50.0, size=n)\n", + "\n", + " return pd.DataFrame({\n", + " \"avg_rating\": np.repeat(avg_ratings, num_views),\n", + " \"num_reviews\": np.repeat(num_reviews, num_views),\n", + " \"dollar_rating\": np.repeat(dollar_ratings, num_views),\n", + " \"clicked\": np.random.binomial(n=1, p=np.repeat(ctr_labels, num_views))\n", + " })\n", + "\n", + "\n", + "# Generate datasets.\n", + "np.random.seed(42)\n", + "data_train = sample_dataset(2000, testing_set=False)\n", + "data_val = sample_dataset(1000, testing_set=False)\n", + "data_test = sample_dataset(1000, testing_set=True)\n", + "\n", + "# Plotting dataset densities.\n", + "figsize(12, 5)\n", + "fig, axs = plt.subplots(1, 2, sharey=False, tight_layout=False)\n", + "for ax, data, title in [(axs[0], data_train, \"training\"),\n", + " (axs[1], data_test, \"testing\")]:\n", + " _, _, _, density = ax.hist2d(\n", + " x=data[\"avg_rating\"],\n", + " y=data[\"num_reviews\"],\n", + " bins=(np.linspace(1, 5, num=21), np.linspace(0, 200, num=21)),\n", + " normed=True,\n", + " cmap=\"Blues\",\n", + " )\n", + " ax.set(xlim=(1, 5))\n", + " ax.set(ylim=(0, 200))\n", + " ax.set(xlabel=\"Average Rating\")\n", + " ax.set(ylabel=\"Number of Reviews\")\n", + " ax.title.set_text(\"Density of {} examples\".format(title))\n", + " _ = fig.colorbar(density, ax=ax)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "4fVyLgpCT1nW" + }, + "source": [ + "Defining input_fns used for training and evaluation:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "DYzRTRR2GKoS" + }, + "outputs": [], + "source": [ + "train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(\n", + " x=data_train,\n", + " y=data_train[\"clicked\"],\n", + " batch_size=BATCH_SIZE,\n", + " num_epochs=NUM_EPOCHS,\n", + " shuffle=False,\n", + ")\n", + "\n", + "# feature_analysis_input_fn is used for TF Lattice estimators.\n", + "feature_analysis_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(\n", + " x=data_train,\n", + " y=data_train[\"clicked\"],\n", + " batch_size=BATCH_SIZE,\n", + " num_epochs=1,\n", + " shuffle=False,\n", + ")\n", + "\n", + "val_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(\n", + " x=data_val,\n", + " y=data_val[\"clicked\"],\n", + " batch_size=BATCH_SIZE,\n", + " num_epochs=1,\n", + " shuffle=False,\n", + ")\n", + "\n", + "test_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(\n", + " x=data_test,\n", + " y=data_test[\"clicked\"],\n", + " batch_size=BATCH_SIZE,\n", + " num_epochs=1,\n", + " shuffle=False,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "qoTrw3FZqvPK" + }, + "source": [ + "## Fitting Gradient Boosted Trees" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "ZklNowexE3wB" + }, + "source": [ + "Let's start off with only two features: `avg_rating` and `num_reviews`.\n", + "\n", + "We create a few auxillary functions for plotting and calculating validation and test metrics." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "SX6rARJWURWl" + }, + "outputs": [], + "source": [ + "def analyze_two_d_estimator(estimator, name):\n", + " # Extract validation metrics.\n", + " metric = estimator.evaluate(input_fn=val_input_fn)\n", + " print(\"Validation AUC: {}\".format(metric[\"auc\"]))\n", + " metric = estimator.evaluate(input_fn=test_input_fn)\n", + " print(\"Testing AUC: {}\".format(metric[\"auc\"]))\n", + "\n", + " def two_d_pred(avg_ratings, num_reviews):\n", + " results = estimator.predict(\n", + " tf.compat.v1.estimator.inputs.pandas_input_fn(\n", + " x=pd.DataFrame({\n", + " \"avg_rating\": avg_ratings,\n", + " \"num_reviews\": num_reviews,\n", + " }),\n", + " shuffle=False,\n", + " ))\n", + " return [x[\"logistic\"][0] for x in results]\n", + "\n", + " def two_d_click_through_rate(avg_ratings, num_reviews):\n", + " return np.mean([\n", + " click_through_rate(avg_ratings, num_reviews,\n", + " np.repeat(d, len(avg_ratings)))\n", + " for d in [\"D\", \"DD\", \"DDD\", \"DDDD\"]\n", + " ],\n", + " axis=0)\n", + "\n", + " figsize(11, 5)\n", + " plot_fns([(\"{} Estimated CTR\".format(name), two_d_pred),\n", + " (\"CTR\", two_d_click_through_rate)],\n", + " split_by_dollar=False)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "JVef4f8yUUbs" + }, + "source": [ + "We can fit TensorFlow gradient boosted decision trees on the dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "DnPYlRAo2mnQ" + }, + "outputs": [], + "source": [ + "feature_columns = [\n", + " tf.feature_column.numeric_column(\"num_reviews\"),\n", + " tf.feature_column.numeric_column(\"avg_rating\"),\n", + "]\n", + "gbt_estimator = tf.estimator.BoostedTreesClassifier(\n", + " feature_columns=feature_columns,\n", + " # Hyper-params optimized on validation set.\n", + " n_batches_per_layer=100,\n", + " max_depth=2,\n", + " n_trees=100,\n", + " config=tf.estimator.RunConfig(tf_random_seed=42),\n", + ")\n", + "gbt_estimator.train(input_fn=train_input_fn)\n", + "analyze_two_d_estimator(gbt_estimator, \"GBT\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "nYZtd6YvsNdn" + }, + "source": [ + "Even though the model has captured the general shape of the true CTR, it produces a counter-intuitive contour plot of the estimated (predicted) CTR: certain spots on the contour surface can see an increased estimated CTR if we move either downwards or leftwards, meaning that one would be more likely to click on the restaurants at those spots if\n", + "- their review numbers decreased, i.e. they were less trendy, or\n", + "- their average ratings dropped, i.e. they served worse food.\n", + "\n", + "A couple of reasons may be accountable:\n", + "- We are missing `dollar_rating`, an important feature.\n", + "- The traing dataset has \"holes\": areas where there are not enough sample points.\n", + "- Noise signal ratio is high.\n", + "- Applied ML model (boosted trees) is too flexibile and is easy to overfit.\n", + "\n", + "The remedy could be simple: we enforce the shape constraint that the model must estimate CTR values monotonically increasing with respect to both the average rating and the number of reviews. We will later see how to implement this in TFL.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Uf7WqGooFiEp" + }, + "source": [ + "## Fitting a DNN" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "_s2aT3x0E_tF" + }, + "source": [ + "We can repeat the same steps with a DNN classifier. Similar patterns can also be observed (you can rerun the cell if not so): not enough sample points are in the area where `num_reviews` is small, and the DNN extrapolation in this area works poorly. " + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "gFUeG6kLDNhO" + }, + "outputs": [], + "source": [ + "feature_columns = [\n", + " tf.feature_column.numeric_column(\"num_reviews\"),\n", + " tf.feature_column.numeric_column(\"avg_rating\"),\n", + "]\n", + "dnn_estimator = tf.estimator.DNNClassifier(\n", + " feature_columns=feature_columns,\n", + " # Hyper-params optimized on validation set.\n", + " hidden_units=[16, 8, 8],\n", + " optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE),\n", + " config=tf.estimator.RunConfig(tf_random_seed=42),\n", + ")\n", + "dnn_estimator.train(input_fn=train_input_fn)\n", + "analyze_two_d_estimator(dnn_estimator, \"DNN\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "0Avkw-okw7JL" + }, + "source": [ + "## Shape Constraints" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "3ExyethCFBrP" + }, + "source": [ + "TensorFlow Lattice (TFL) is laser-focusing on installing shape constraints to safeguard model behavior. These shape constraints are fulfilled by two key concepts of TFL: \n", + "- *calibrator*: a piece-wise linear function, and\n", + "- *lattice*: a multi-dimentional lookup table. \n", + "\n", + "Their details can be found in [our JMLR paper](http://jmlr.org/papers/volume17/15-243/15-243.pdf). \n", + "\n", + "The most straightforward way to use TFL is through the premade TFL canned estimators. In this colab we will configure a TFL canned classifier. Similar to a TensorFlow estimator, training a TFL canned estimator requires several components:\n", + "- feature columns: definition of model features.\n", + "- feature configs: definition of TFL specific feature specs and shape constraints.\n", + "- model config: configuration of TFL canned estimator specs.\n", + "- feature analysis input fn: a TF input fn passing data for TFL initialization.\n", + "- (train) input fn: a TF input fn passing data for model training." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "anyCM4sCpOSo" + }, + "source": [ + "### Monotonicity\n", + "We first address our monotonicity concerns by adding monotonicity shape constraints to both features involved. \n", + "\n", + "To instruct TFL to install shape constraints, we decide which features are involved and specify the constraints for any feature in its feature config. The following code shows how we can require the output to be monotonically increasing with respect to both `num_reviews` and `avg_rating`.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "FCm1lOjmwur_" + }, + "outputs": [], + "source": [ + "feature_columns = [\n", + " tf.feature_column.numeric_column(\"num_reviews\"),\n", + " tf.feature_column.numeric_column(\"avg_rating\"),\n", + "]\n", + "model_config = tfl.configs.CalibratedLatticeConfig(\n", + " feature_configs=[\n", + " tfl.configs.FeatureConfig(\n", + " name=\"num_reviews\",\n", + " lattice_size=2,\n", + " monotonicity=\"increasing\",\n", + " pwl_calibration_num_keypoints=20,\n", + " ),\n", + " tfl.configs.FeatureConfig(\n", + " name=\"avg_rating\",\n", + " lattice_size=2,\n", + " monotonicity=\"increasing\",\n", + " pwl_calibration_num_keypoints=20,\n", + " )\n", + " ])\n", + "tfl_estimator = tfl.estimators.CannedClassifier(\n", + " feature_columns=feature_columns,\n", + " model_config=model_config,\n", + " feature_analysis_input_fn=feature_analysis_input_fn,\n", + " optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE),\n", + " config=tf.estimator.RunConfig(tf_random_seed=42),\n", + ")\n", + "tfl_estimator.train(input_fn=train_input_fn)\n", + "analyze_two_d_estimator(tfl_estimator, \"TF Lattice\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "ubNRBCWW5wQ9" + }, + "source": [ + "This canned classifier we built behaves in the follow manner:\n", + "- It first applies a *calibrator* (a piece-wise linear function) to each feature to map the feature values onto [0,1].\n", + "- It then joins the calibrated feature values using a *lattice* and outputs the prediction.\n", + "\n", + "We can use `tfl.visualization` to visualize model behavior. In particular, the following plot shows the two trained calibrators included in the canned classifier. \n" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "both", + "colab": {}, + "colab_type": "code", + "id": "C0py9Q6OBRBE" + }, + "outputs": [], + "source": [ + "def save_and_visualize_lattice(tfl_estimator):\n", + " saved_model_path = tfl_estimator.export_saved_model(\n", + " \"/tmp/TensorFlow_Lattice_101/\",\n", + " tf.estimator.export.build_parsing_serving_input_receiver_fn(\n", + " feature_spec=tf.feature_column.make_parse_example_spec(\n", + " feature_columns)))\n", + " model_graph = tfl.estimators.get_model_graph(saved_model_path)\n", + " figsize(8, 8)\n", + " tfl.visualization.draw_model_graph(model_graph)\n", + " return model_graph\n", + "\n", + "_ = save_and_visualize_lattice(tfl_estimator)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "7vZ5fShXs504" + }, + "source": [ + "Now the contour plot gets cleaner and is showing what makes sense: estimated CTR will go up as long as the average rating increases or the number of reviews increases. Notice that the calibrators are monotonic." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "RfniRZCHIvfK" + }, + "source": [ + "### Diminishing Returns\n", + "[Diminishing returns](https://en.wikipedia.org/wiki/Diminishing_returns) means that the marginal gain of increasing certain a feature value will decrease as we increase the value. In our case we expect that the `num_reviews` feature follows this pattern, so we can configure its calibrator accordingly. Notice that we can decompose diminishing returns into two sufficient conditions:\n", + "\n", + "- the calibrator is monotonicially increasing, and\n", + "- the calibrator is concave.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "XQrM9BskY-wx" + }, + "outputs": [], + "source": [ + "feature_columns = [\n", + " tf.feature_column.numeric_column(\"num_reviews\"),\n", + " tf.feature_column.numeric_column(\"avg_rating\"),\n", + "]\n", + "model_config = tfl.configs.CalibratedLatticeConfig(\n", + " feature_configs=[\n", + " tfl.configs.FeatureConfig(\n", + " name=\"num_reviews\",\n", + " lattice_size=2,\n", + " monotonicity=\"increasing\",\n", + " pwl_calibration_convexity=\"concave\",\n", + " pwl_calibration_num_keypoints=20,\n", + " ),\n", + " tfl.configs.FeatureConfig(\n", + " name=\"avg_rating\",\n", + " lattice_size=2,\n", + " monotonicity=\"increasing\",\n", + " pwl_calibration_num_keypoints=20,\n", + " )\n", + " ])\n", + "tfl_estimator = tfl.estimators.CannedClassifier(\n", + " feature_columns=feature_columns,\n", + " model_config=model_config,\n", + " feature_analysis_input_fn=feature_analysis_input_fn,\n", + " optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE),\n", + " config=tf.estimator.RunConfig(tf_random_seed=42),\n", + ")\n", + "tfl_estimator.train(input_fn=train_input_fn)\n", + "analyze_two_d_estimator(tfl_estimator, \"TF Lattice\")\n", + "_ = save_and_visualize_lattice(tfl_estimator)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "J6CP2Ovapiu3" + }, + "source": [ + "### 2D Shape Constraint: Trust\n", + "A 5-star rating for a restaurant with only one or two reviews is likely an unreliable rating (the restaurant might not actually be good), whereas a 4-star rating for a restaurant with hundreds of reviews is much more reliable (the restaurant is likely good in this case). We can see that the number of reviews of a restaurant affects how much trust we place in its average rating. \n", + "\n", + "We can exercise TFL trust constraints to inform the model that the larger (or smaller) value of one feature indicates more usage of another feature. " + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "OA14j0erm6TJ" + }, + "outputs": [], + "source": [ + "feature_columns = [\n", + " tf.feature_column.numeric_column(\"num_reviews\"),\n", + " tf.feature_column.numeric_column(\"avg_rating\"),\n", + "]\n", + "model_config = tfl.configs.CalibratedLatticeConfig(\n", + " feature_configs=[\n", + " tfl.configs.FeatureConfig(\n", + " name=\"num_reviews\",\n", + " lattice_size=2,\n", + " monotonicity=\"increasing\",\n", + " pwl_calibration_convexity=\"concave\",\n", + " pwl_calibration_num_keypoints=20,\n", + " # Larger num_reviews indicating more trust in avg_rating.\n", + " reflects_trust_in=[\n", + " tfl.configs.TrustConfig(\n", + " feature_name=\"avg_rating\", trust_type=\"edgeworth\"),\n", + " ],\n", + " ),\n", + " tfl.configs.FeatureConfig(\n", + " name=\"avg_rating\",\n", + " lattice_size=2,\n", + " monotonicity=\"increasing\",\n", + " pwl_calibration_num_keypoints=20,\n", + " )\n", + " ])\n", + "tfl_estimator = tfl.estimators.CannedClassifier(\n", + " feature_columns=feature_columns,\n", + " model_config=model_config,\n", + " feature_analysis_input_fn=feature_analysis_input_fn,\n", + " optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE),\n", + " config=tf.estimator.RunConfig(tf_random_seed=42),\n", + ")\n", + "tfl_estimator.train(input_fn=train_input_fn)\n", + "analyze_two_d_estimator(tfl_estimator, \"TF Lattice\")\n", + "model_graph = save_and_visualize_lattice(tfl_estimator)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "puvP9X8XxyRV" + }, + "source": [ + "The following plot presents the trained lattice lookup result. Due to the trust constraint, we would expect that larger values of calibrated `num_reviews` would enable wider ranges for calibrated `avg_rating` to more significantly move the lattice output. This is confirmed by the plot." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "both", + "colab": {}, + "colab_type": "code", + "id": "RounEQebxxnA" + }, + "outputs": [], + "source": [ + "lat_mesh_n = 12\n", + "lat_mesh_x, lat_mesh_y = tfl.test_utils.two_dim_mesh_grid(\n", + " lat_mesh_n**2, 0, 0, 1, 1)\n", + "lat_mesh_fn = tfl.test_utils.get_hypercube_interpolation_fn(\n", + " model_graph.output_node.weights.flatten())\n", + "lat_mesh_z = [\n", + " lat_mesh_fn([lat_mesh_x.flatten()[i],\n", + " lat_mesh_y.flatten()[i]]) for i in range(lat_mesh_n**2)\n", + "]\n", + "trust_plt = tfl.visualization.plot_outputs(\n", + " (lat_mesh_x, lat_mesh_y),\n", + " {\"Lattice Lookup\": lat_mesh_z},\n", + " figsize=(6, 6),\n", + ")\n", + "trust_plt.title(\"Trust\")\n", + "trust_plt.xlabel(\"Calibrated avg_rating\")\n", + "trust_plt.ylabel(\"Calibrated num_reviews\")\n", + "trust_plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "SKe3UHX6pUjw" + }, + "source": [ + "### Smoothing Calibrators\n", + "Let's now take a look at the calibrator of `avg_rating`. Though it is monotonically increasing, the changes in its slopes are somewhat random and hard to interpret. That suggests we might want to consider smoothing this calibrator.\n", + "\n", + "Here we apply a `wrinkle` regularizer to reduce changes in the curvature. There are also the `laplacian` regularizer to flatten the calibrator and the `hessian` regularizer to make it more linear. \n" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "qxFHH3hSpWfq" + }, + "outputs": [], + "source": [ + "feature_columns = [\n", + " tf.feature_column.numeric_column(\"num_reviews\"),\n", + " tf.feature_column.numeric_column(\"avg_rating\"),\n", + "]\n", + "model_config = tfl.configs.CalibratedLatticeConfig(\n", + " feature_configs=[\n", + " tfl.configs.FeatureConfig(\n", + " name=\"num_reviews\",\n", + " lattice_size=2,\n", + " monotonicity=\"increasing\",\n", + " pwl_calibration_convexity=\"concave\",\n", + " pwl_calibration_num_keypoints=20,\n", + " regularizer_configs=[\n", + " tfl.configs.RegularizerConfig(name=\"calib_wrinkle\", l2=1.0),\n", + " ],\n", + " reflects_trust_in=[\n", + " tfl.configs.TrustConfig(\n", + " feature_name=\"avg_rating\", trust_type=\"edgeworth\"),\n", + " ],\n", + " ),\n", + " tfl.configs.FeatureConfig(\n", + " name=\"avg_rating\",\n", + " lattice_size=2,\n", + " monotonicity=\"increasing\",\n", + " pwl_calibration_num_keypoints=20,\n", + " regularizer_configs=[\n", + " tfl.configs.RegularizerConfig(name=\"calib_wrinkle\", l2=1.0),\n", + " ],\n", + " )\n", + " ])\n", + "tfl_estimator = tfl.estimators.CannedClassifier(\n", + " feature_columns=feature_columns,\n", + " model_config=model_config,\n", + " feature_analysis_input_fn=feature_analysis_input_fn,\n", + " optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE),\n", + " config=tf.estimator.RunConfig(tf_random_seed=42),\n", + ")\n", + "tfl_estimator.train(input_fn=train_input_fn)\n", + "analyze_two_d_estimator(tfl_estimator, \"TF Lattice\")\n", + "_ = save_and_visualize_lattice(tfl_estimator)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "pSUd6aFlpYz4" + }, + "source": [ + "### Partial Monotonicity for Categorical Calibration\n", + "So far we have been using only two of the numeric features in the model. Here we will add a third feature using a categorical calibration layer. Again we start by setting up helper functions for plotting and metric calculation." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "5tLDKwTmjrLw" + }, + "outputs": [], + "source": [ + "def analyze_three_d_estimator(estimator, name):\n", + " # Extract validation metrics.\n", + " metric = estimator.evaluate(input_fn=val_input_fn)\n", + " print(\"Validation AUC: {}\".format(metric[\"auc\"]))\n", + " metric = estimator.evaluate(input_fn=test_input_fn)\n", + " print(\"Testing AUC: {}\".format(metric[\"auc\"]))\n", + "\n", + " def three_d_pred(avg_ratings, num_reviews, dollar_rating):\n", + " results = estimator.predict(\n", + " tf.compat.v1.estimator.inputs.pandas_input_fn(\n", + " x=pd.DataFrame({\n", + " \"avg_rating\": avg_ratings,\n", + " \"num_reviews\": num_reviews,\n", + " \"dollar_rating\": dollar_rating,\n", + " }),\n", + " shuffle=False,\n", + " ))\n", + " return [x[\"logistic\"][0] for x in results]\n", + "\n", + " figsize(11, 22)\n", + " plot_fns([(\"{} Estimated CTR\".format(name), three_d_pred),\n", + " (\"CTR\", click_through_rate)],\n", + " split_by_dollar=True)\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "CnPiqf4rq6kJ" + }, + "source": [ + "To involve the third feature `dollar_rating`, we should recall that categorical features require a slightly different treatment in TFL: both as a feature column and as a feature config. " + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "m-w7iGEEpgGt" + }, + "outputs": [], + "source": [ + "feature_columns = [\n", + " tf.feature_column.numeric_column(\"num_reviews\"),\n", + " tf.feature_column.numeric_column(\"avg_rating\"),\n", + " tf.feature_column.categorical_column_with_vocabulary_list(\n", + " \"dollar_rating\",\n", + " vocabulary_list=[\"D\", \"DD\", \"DDD\", \"DDDD\"],\n", + " dtype=tf.string,\n", + " default_value=0),\n", + "]\n", + "model_config = tfl.configs.CalibratedLatticeConfig(\n", + " feature_configs=[\n", + " tfl.configs.FeatureConfig(\n", + " name=\"num_reviews\",\n", + " lattice_size=2,\n", + " monotonicity=\"increasing\",\n", + " pwl_calibration_convexity=\"concave\",\n", + " pwl_calibration_num_keypoints=20,\n", + " regularizer_configs=[\n", + " tfl.configs.RegularizerConfig(name=\"calib_wrinkle\", l2=1.0),\n", + " ],\n", + " reflects_trust_in=[\n", + " tfl.configs.TrustConfig(\n", + " feature_name=\"avg_rating\", trust_type=\"edgeworth\"),\n", + " ],\n", + " ),\n", + " tfl.configs.FeatureConfig(\n", + " name=\"avg_rating\",\n", + " lattice_size=2,\n", + " monotonicity=\"increasing\",\n", + " pwl_calibration_num_keypoints=20,\n", + " regularizer_configs=[\n", + " tfl.configs.RegularizerConfig(name=\"calib_wrinkle\", l2=1.0),\n", + " ],\n", + " ),\n", + " tfl.configs.FeatureConfig(\n", + " name=\"dollar_rating\",\n", + " lattice_size=2,\n", + " pwl_calibration_num_keypoints=4,\n", + " # Here we only specify one monotonicity:\n", + " # `D` resturants has smaller value than `DD` restaurants\n", + " monotonicity=[(\"D\", \"DD\")],\n", + " ),\n", + " ])\n", + "tfl_estimator = tfl.estimators.CannedClassifier(\n", + " feature_columns=feature_columns,\n", + " model_config=model_config,\n", + " feature_analysis_input_fn=feature_analysis_input_fn,\n", + " optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE),\n", + " config=tf.estimator.RunConfig(tf_random_seed=42),\n", + ")\n", + "tfl_estimator.train(input_fn=train_input_fn)\n", + "analyze_three_d_estimator(tfl_estimator, \"TF Lattice\")\n", + "_ = save_and_visualize_lattice(tfl_estimator)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "gdIzhYL79_Pp" + }, + "source": [ + "This categorical calibrator shows the preference of the model output: DD \u003e D \u003e DDD \u003e DDDD, which is consistent with our setup. Notice there is also a column for missing values. Though there is no missing feature in our training and testing data, the model provides us with the best way to treat the missing value should it happen during downstream model serving.\n", + "\n", + "Here we also plot the predicted CTR of this model conditioned on `dollar_rating`. Notice that all the constraints we required are fulfilled in each of the slices." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "rh0H2b6l_rwZ" + }, + "source": [ + "### Output Calibration" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "KPb2ri4e7HXF" + }, + "source": [ + "For all the TFL models we have trained so far, the lattice layer (indicated as \"Lattice\" in the model graph) directly outputs the model prediction. Sometimes we are not sure whether the lattice output should be rescaled to emit model outputs:\n", + "- the features are $log$ counts while the labels are counts.\n", + "- the lattice is configured to have very few vertices but the label distribution is relatively complicated.\n", + "\n", + "In those cases we can add another calibrator between the lattice output and the model output to increase model flexibility. Here let's add a calibrator layer with 5 keypoints to the model we just built. We also add a regularizer for the output calibrator to keep the function smooth.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "k5Sg_gUj_0i4" + }, + "outputs": [], + "source": [ + "feature_columns = [\n", + " tf.feature_column.numeric_column(\"num_reviews\"),\n", + " tf.feature_column.numeric_column(\"avg_rating\"),\n", + " tf.feature_column.categorical_column_with_vocabulary_list(\n", + " \"dollar_rating\",\n", + " vocabulary_list=[\"D\", \"DD\", \"DDD\", \"DDDD\"],\n", + " dtype=tf.string,\n", + " default_value=0),\n", + "]\n", + "model_config = tfl.configs.CalibratedLatticeConfig(\n", + " output_calibration=True,\n", + " output_calibration_num_keypoint=5,\n", + " regularizer_configs=[\n", + " tfl.configs.RegularizerConfig(name=\"output_calib_wrinkle\", l2=0.1),\n", + " ],\n", + " feature_configs=[\n", + " tfl.configs.FeatureConfig(\n", + " name=\"num_reviews\",\n", + " lattice_size=2,\n", + " monotonicity=\"increasing\",\n", + " pwl_calibration_convexity=\"concave\",\n", + " pwl_calibration_num_keypoints=20,\n", + " regularizer_configs=[\n", + " tfl.configs.RegularizerConfig(name=\"calib_wrinkle\", l2=1.0),\n", + " ],\n", + " reflects_trust_in=[\n", + " tfl.configs.TrustConfig(\n", + " feature_name=\"avg_rating\", trust_type=\"edgeworth\"),\n", + " ],\n", + " ),\n", + " tfl.configs.FeatureConfig(\n", + " name=\"avg_rating\",\n", + " lattice_size=2,\n", + " monotonicity=\"increasing\",\n", + " pwl_calibration_num_keypoints=20,\n", + " regularizer_configs=[\n", + " tfl.configs.RegularizerConfig(name=\"calib_wrinkle\", l2=1.0),\n", + " ],\n", + " ),\n", + " tfl.configs.FeatureConfig(\n", + " name=\"dollar_rating\",\n", + " lattice_size=2,\n", + " pwl_calibration_num_keypoints=4,\n", + " # Here we only specify one monotonicity:\n", + " # `D` resturants has smaller value than `DD` restaurants\n", + " monotonicity=[(\"D\", \"DD\")],\n", + " ),\n", + "])\n", + "tfl_estimator = tfl.estimators.CannedClassifier(\n", + " feature_columns=feature_columns,\n", + " model_config=model_config,\n", + " feature_analysis_input_fn=feature_analysis_input_fn,\n", + " optimizer=tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE),\n", + " config=tf.estimator.RunConfig(tf_random_seed=42),\n", + ")\n", + "tfl_estimator.train(input_fn=train_input_fn)\n", + "analyze_three_d_estimator(tfl_estimator, \"TF Lattice\")\n", + "_ = save_and_visualize_lattice(tfl_estimator)" + ] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "name": "tfl_shape_constraints.ipynb", + "private_outputs": true, + "provenance": [ + { + "file_id": "1NYk-Kehpe0V3JgdRAYZmR9-kUdKcSxys", + "timestamp": 1579632224365 + } + ], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/examples/BUILD b/examples/BUILD new file mode 100644 index 0000000..ec47d8d --- /dev/null +++ b/examples/BUILD @@ -0,0 +1,62 @@ +# Copyright 2019 The TensorFlow Lattice Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +licenses(["notice"]) + +package( + default_visibility = [ + "//tensorflow_lattice:__subpackages__", + ], +) + +py_binary( + name = "canned_estimators_uci_heart", + srcs = ["canned_estimators_uci_heart.py"], + python_version = "PY3", + deps = [ + # tensorflow dep, + "//tensorflow_lattice", + ], +) + +py_binary( + name = "keras_sequential_uci_heart", + srcs = ["keras_sequential_uci_heart.py"], + python_version = "PY3", + deps = [ + # tensorflow dep, + "//tensorflow_lattice", + ], +) + +py_binary( + name = "keras_functional_uci_heart", + srcs = ["keras_functional_uci_heart.py"], + python_version = "PY3", + deps = [ + # tensorflow dep, + "//tensorflow_lattice", + ], +) + +py_binary( + name = "custom_estimators_uci_heart", + srcs = ["custom_estimators_uci_heart.py"], + python_version = "PY3", + deps = [ + # tensorflow dep, + "//tensorflow_lattice", + ], +) diff --git a/examples/canned_estimators_uci_heart.py b/examples/canned_estimators_uci_heart.py new file mode 100644 index 0000000..51597c3 --- /dev/null +++ b/examples/canned_estimators_uci_heart.py @@ -0,0 +1,325 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""Example usage of TFL canned estimators. + +This example trains several TFL canned estimators on the UCI heart dataset. + +Example usage: +canned_estimators_uci_heart --config_updates=feature__age__lattice_size=4 +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import re +from absl import app +from absl import flags +import pandas as pd +import tensorflow as tf +from tensorflow import feature_column as fc +from tensorflow_lattice import configs +from tensorflow_lattice import estimators + +FLAGS = flags.FLAGS +flags.DEFINE_float('learning_rate', 0.1, 'Learning rate.') +flags.DEFINE_integer('batch_size', 100, 'Batch size.') +flags.DEFINE_integer('num_epochs', 50, 'Number of training epoch.') +flags.DEFINE_integer('prefitting_num_epochs', 10, 'Prefitting epochs.') +flags.DEFINE_list( + 'config_updates', '', + 'Comma separated list of updates to model configs in name=value format.' + 'See tfl.configs.apply_updates().') + + +def main(_): + # Parse configs updates from command line flags. + config_updates = [] + for update in FLAGS.config_updates: + config_updates.extend(re.findall(r'(\S*)\s*=\s*(\S*)', update)) + + # UCI Statlog (Heart) dataset. + csv_file = tf.keras.utils.get_file( + 'heart.csv', 'http://storage.googleapis.com/applied-dl/heart.csv') + df = pd.read_csv(csv_file) + target = df.pop('target') + train_size = int(len(df) * 0.8) + train_x = df[:train_size] + train_y = target[:train_size] + test_x = df[train_size:] + test_y = target[train_size:] + + # feature_analysis_input_fn is used to collect statistics about the input + # features, thus requiring only one loop of the dataset. + # + # feature_analysis_input_fn is required if you have at least one FeatureConfig + # with "pwl_calibration_input_keypoints='quantiles'". Note that 'quantiles' is + # default keypoints configuration so most likely you'll need it. + feature_analysis_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn( + x=train_x, + y=train_y, + shuffle=False, + batch_size=FLAGS.batch_size, + num_epochs=1, + num_threads=1) + + # prefitting_input_fn is used to prefit an initial ensemble that is used to + # estimate feature interactions. This prefitting step does not need to fully + # converge and thus requiring fewer epochs than the main training. + # + # prefitting_input_fn is only required if your model_config is + # CalibratedLatticeEnsembleConfig with "lattices='crystals'" + prefitting_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn( + x=train_x, + y=train_y, + shuffle=True, + batch_size=FLAGS.batch_size, + num_epochs=FLAGS.prefitting_num_epochs, + num_threads=1) + + train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn( + x=train_x, + y=train_y, + shuffle=True, + batch_size=FLAGS.batch_size, + num_epochs=FLAGS.num_epochs, + num_threads=1) + + test_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn( + x=test_x, + y=test_y, + shuffle=False, + batch_size=FLAGS.batch_size, + num_epochs=FLAGS.num_epochs, + num_threads=1) + + # Feature columns. + # - age + # - sex + # - cp chest pain type (4 values) + # - trestbps resting blood pressure + # - chol serum cholestoral in mg/dl + # - fbs fasting blood sugar > 120 mg/dl + # - restecg resting electrocardiographic results (values 0,1,2) + # - thalach maximum heart rate achieved + # - exang exercise induced angina + # - oldpeak ST depression induced by exercise relative to rest + # - slope the slope of the peak exercise ST segment + # - ca number of major vessels (0-3) colored by flourosopy + # - thal 3 = normal; 6 = fixed defect; 7 = reversable defect + feature_columns = [ + fc.numeric_column('age', default_value=-1), + fc.categorical_column_with_vocabulary_list('sex', [0, 1]), + fc.numeric_column('cp'), + fc.numeric_column('trestbps', default_value=-1), + fc.numeric_column('chol'), + fc.categorical_column_with_vocabulary_list('fbs', [0, 1]), + fc.categorical_column_with_vocabulary_list('restecg', [0, 1, 2]), + fc.numeric_column('thalach'), + fc.categorical_column_with_vocabulary_list('exang', [0, 1]), + fc.numeric_column('oldpeak'), + fc.categorical_column_with_vocabulary_list('slope', [0, 1, 2]), + fc.numeric_column('ca'), + fc.categorical_column_with_vocabulary_list( + 'thal', ['normal', 'fixed', 'reversible']), + ] + + # Feature configs are used to specify how each feature is calibrated and used. + feature_configs = [ + configs.FeatureConfig( + name='age', + lattice_size=3, + # By default, input keypoints of pwl are quantiles of the feature. + pwl_calibration_num_keypoints=5, + monotonicity='increasing', + pwl_calibration_clip_max=100, + ), + configs.FeatureConfig( + name='cp', + pwl_calibration_num_keypoints=4, + # Keypoints can be uniformly spaced. + pwl_calibration_input_keypoints='uniform', + monotonicity='increasing', + ), + configs.FeatureConfig( + name='chol', + # Explicit input keypoint initialization. + pwl_calibration_input_keypoints=[126.0, 210.0, 247.0, 286.0, 564.0], + monotonicity='increasing', + pwl_calibration_clip_min=130, + # Calibration can be forced to span the full output range by clamping. + pwl_calibration_clamp_min=True, + pwl_calibration_clamp_max=True, + # Per feature regularization. + regularizer_configs=[ + configs.RegularizerConfig(name='calib_hessian', l2=1e-4), + ], + ), + configs.FeatureConfig( + name='fbs', + # Monotonicity: output for 1 should be larger than output for 0. + monotonicity=[(0, 1)], + ), + configs.FeatureConfig( + name='trestbps', + pwl_calibration_num_keypoints=5, + monotonicity='decreasing', + ), + configs.FeatureConfig( + name='thalach', + pwl_calibration_num_keypoints=5, + monotonicity='decreasing', + ), + configs.FeatureConfig( + name='restecg', + # Categorical monotonicity can be partial order. + monotonicity=[(0, 1), (0, 2)], + ), + configs.FeatureConfig( + name='exang', + monotonicity=[(0, 1)], + ), + configs.FeatureConfig( + name='oldpeak', + pwl_calibration_num_keypoints=5, + monotonicity='increasing', + ), + configs.FeatureConfig( + name='slope', + monotonicity=[(0, 1), (1, 2)], + ), + configs.FeatureConfig( + name='ca', + pwl_calibration_num_keypoints=4, + monotonicity='increasing', + ), + configs.FeatureConfig( + name='thal', + monotonicity=[('normal', 'fixed'), ('normal', 'reversible')], + ), + ] + + # Serving input fn is used to create saved models. + serving_input_fn = ( + tf.estimator.export.build_parsing_serving_input_receiver_fn( + feature_spec=fc.make_parse_example_spec(feature_columns))) + + # Model config defines the model strcutre for the estimator. + # This is calibrated linear model with outputput calibration: Inputs are + # calibrated, linearly combined and the output of the linear layer is + # calibrated again using a PWL function. + model_config = configs.CalibratedLinearConfig( + feature_configs=feature_configs, + use_bias=True, + output_calibration=True, + regularizer_configs=[ + # Regularizer for the output calibrator. + configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4), + ]) + # Update model configuration. + # See tfl.configs.apply_updates for details. + configs.apply_updates(model_config, config_updates) + estimator = estimators.CannedClassifier( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=feature_analysis_input_fn, + optimizer=tf.keras.optimizers.Adam(FLAGS.learning_rate)) + estimator.train(input_fn=train_input_fn) + results = estimator.evaluate(input_fn=test_input_fn) + print('Calibrated linear results: {}'.format(results)) + print('Calibrated linear model exported to {}'.format( + estimator.export_saved_model(estimator.model_dir, serving_input_fn))) + + # This is calibrated lattice model: Inputs are calibrated, then combined + # non-linearly using a lattice layer. + model_config = configs.CalibratedLatticeConfig( + feature_configs=feature_configs, + regularizer_configs=[ + # Torsion regularizer applied to the lattice to make it more linear. + configs.RegularizerConfig(name='torsion', l2=1e-4), + # Globally defined calibration regularizer is applied to all features. + configs.RegularizerConfig(name='calib_hessian', l2=1e-4), + ]) + estimator = estimators.CannedClassifier( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=feature_analysis_input_fn, + optimizer=tf.keras.optimizers.Adam(FLAGS.learning_rate)) + estimator.train(input_fn=train_input_fn) + results = estimator.evaluate(input_fn=test_input_fn) + print('Calibrated lattice results: {}'.format(results)) + print('Calibrated lattice model exported to {}'.format( + estimator.export_saved_model(estimator.model_dir, serving_input_fn))) + + # This is random lattice ensemble model with separate calibration: + # model output is the average output of separatly calibrated lattices. + model_config = configs.CalibratedLatticeEnsembleConfig( + feature_configs=feature_configs, + num_lattices=6, + lattice_rank=5, + separate_calibrators=True, + regularizer_configs=[ + # Torsion regularizer applied to the lattice to make it more linear. + configs.RegularizerConfig(name='torsion', l2=1e-4), + # Globally defined calibration regularizer is applied to all features. + configs.RegularizerConfig(name='calib_hessian', l2=1e-4), + ]) + configs.apply_updates(model_config, config_updates) + estimator = estimators.CannedClassifier( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=feature_analysis_input_fn, + optimizer=tf.keras.optimizers.Adam(FLAGS.learning_rate)) + estimator.train(input_fn=train_input_fn) + results = estimator.evaluate(input_fn=test_input_fn) + print('Random ensemble results: {}'.format(results)) + print('Random ensemble model exported to {}'.format( + estimator.export_saved_model(estimator.model_dir, serving_input_fn))) + + # This is Crystals ensemble model with separate calibration: model output is + # the average output of separatly calibrated lattices. + # Crystals algorithm first trains a prefitting model and uses the interactions + # between features to form the final lattice ensemble. + model_config = configs.CalibratedLatticeEnsembleConfig( + feature_configs=feature_configs, + # Using Crystals algorithm. + lattices='crystals', + num_lattices=6, + lattice_rank=5, + separate_calibrators=True, + regularizer_configs=[ + # Torsion regularizer applied to the lattice to make it more linear. + configs.RegularizerConfig(name='torsion', l2=1e-4), + # Globally defined calibration regularizer is applied to all features. + configs.RegularizerConfig(name='calib_hessian', l2=1e-4), + ]) + configs.apply_updates(model_config, config_updates) + estimator = estimators.CannedClassifier( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=feature_analysis_input_fn, + # prefitting_input_fn is required to train the prefitting model. + prefitting_input_fn=prefitting_input_fn, + optimizer=tf.keras.optimizers.Adam(FLAGS.learning_rate)) + estimator.train(input_fn=train_input_fn) + results = estimator.evaluate(input_fn=test_input_fn) + print('Crystals ensemble results: {}'.format(results)) + print('Crystals ensemble model exported to {}'.format( + estimator.export_saved_model(estimator.model_dir, serving_input_fn))) + + +if __name__ == '__main__': + app.run(main) diff --git a/examples/coffee_test.py b/examples/coffee_test.py deleted file mode 100644 index c465fb3..0000000 --- a/examples/coffee_test.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for lattice estimators.""" -import numpy as np -import tensorflow as tf -import tensorflow_lattice as tfl - -# Example training and testing data. -train_features = { - 'distance': np.array([1.0, 2.0, 3.0, 4.0, 5.0]), - 'quality': np.array([2.0, 5.0, 1.0, 2.0, 5.0]), -} -train_labels = np.array([0.2, 1.0, 0.0, 0.0, 1.0]) - -# Same quality but different distance. -test_features = { - 'distance': np.array([5.0, 10.0]), - 'quality': np.array([3.0, 3.0]), -} - -# Feature definition. -feature_columns = [ - tf.feature_column.numeric_column('distance'), - tf.feature_column.numeric_column('quality'), -] - -# Hyperparameters. -num_keypoints = 10 -hparams = tfl.CalibratedLatticeHParams( - feature_names=['distance', 'quality'], - num_keypoints=num_keypoints, - learning_rate=0.1, -) - -# Set feature monotonicity. -hparams.set_feature_param('distance', 'monotonicity', -1) -hparams.set_feature_param('quality', 'monotonicity', +1) - -# Define keypoint init. -keypoints_init_fns = { - 'distance': lambda: tfl.uniform_keypoints_for_signal(num_keypoints, - input_min=0.0, - input_max=10.0, - output_min=0.0, - output_max=1.0), - 'quality': lambda: tfl.uniform_keypoints_for_signal(num_keypoints, - input_min=0.0, - input_max=5.0, - output_min=0.0, - output_max=1.0), -} - -lattice_estimator = tfl.calibrated_lattice_regressor( - feature_columns=feature_columns, - hparams=hparams, - keypoints_initializers_fn=keypoints_init_fns) - -# Train! -train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x=train_features, - y=train_labels, - batch_size=1, - num_epochs=100, - shuffle=False) - -lattice_estimator.train(input_fn=train_input_fn) - -# Test. -test_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x=test_features, y=None, batch_size=1, num_epochs=1, shuffle=False) - -print(list(lattice_estimator.predict(input_fn=test_input_fn))) diff --git a/examples/custom_estimators_uci_heart.py b/examples/custom_estimators_uci_heart.py new file mode 100644 index 0000000..ac3e5e3 --- /dev/null +++ b/examples/custom_estimators_uci_heart.py @@ -0,0 +1,170 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""Example usage of TFL layers in custom estimators. + +This example trains a TFL custom estimators on the UCI heart dataset. + +Example usage: +custom_estimators_uci_heart --num_epochs=40 +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import app +from absl import flags +import numpy as np +import pandas as pd +import tensorflow as tf +from tensorflow import feature_column as fc +import tensorflow_lattice as tfl +from tensorflow_estimator.python.estimator.canned import optimizers +from tensorflow_estimator.python.estimator.head import binary_class_head + +FLAGS = flags.FLAGS +flags.DEFINE_float('learning_rate', 0.01, 'Learning rate.') +flags.DEFINE_integer('batch_size', 100, 'Batch size.') +flags.DEFINE_integer('num_epochs', 200, 'Number of training epoch.') + + +def main(_): + # UCI Statlog (Heart) dataset. + csv_file = tf.keras.utils.get_file( + 'heart.csv', 'http://storage.googleapis.com/applied-dl/heart.csv') + df = pd.read_csv(csv_file) + target = df.pop('target') + train_size = int(len(df) * 0.8) + train_x = df[:train_size] + train_y = target[:train_size] + test_x = df[train_size:] + test_y = target[train_size:] + + train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn( + x=train_x, + y=train_y, + shuffle=True, + batch_size=FLAGS.batch_size, + num_epochs=FLAGS.num_epochs, + num_threads=1) + + test_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn( + x=test_x, + y=test_y, + shuffle=False, + batch_size=FLAGS.batch_size, + num_epochs=FLAGS.num_epochs, + num_threads=1) + + # Feature columns. + # - age + # - sex + # - cp chest pain type (4 values) + # - trestbps resting blood pressure + # - chol serum cholestoral in mg/dl + # - fbs fasting blood sugar > 120 mg/dl + # - restecg resting electrocardiographic results (values 0,1,2) + # - thalach maximum heart rate achieved + # - exang exercise induced angina + # - oldpeak ST depression induced by exercise relative to rest + # - slope the slope of the peak exercise ST segment + # - ca number of major vessels (0-3) colored by flourosopy + # - thal 3 = normal; 6 = fixed defect; 7 = reversable defect + feature_columns = [ + fc.numeric_column('age', default_value=-1), + fc.categorical_column_with_vocabulary_list('sex', [0, 1]), + fc.numeric_column('ca'), + fc.categorical_column_with_vocabulary_list( + 'thal', ['normal', 'fixed', 'reversible']), + ] + + def model_fn(features, labels, mode, config): + """model_fn for the custom estimator.""" + del config + input_tensors = tfl.estimators.transform_features(features, feature_columns) + inputs = { + key: tf.keras.layers.Input(shape=(1,), name=key) + for key in input_tensors + } + + lattice_sizes = [3, 2, 2, 2] + lattice_monotonicities = ['increasing', 'none', 'increasing', 'increasing'] + lattice_input = tf.keras.layers.Concatenate(axis=1)([ + tfl.layers.PWLCalibration( + input_keypoints=np.linspace(10, 100, num=8, dtype=np.float32), + # The output range of the calibrator should be the input range of + # the following lattice dimension. + output_min=0.0, + output_max=lattice_sizes[0] - 1.0, + monotonicity='increasing', + )(inputs['age']), + tfl.layers.CategoricalCalibration( + # Number of categories including any missing/default category. + num_buckets=2, + output_min=0.0, + output_max=lattice_sizes[1] - 1.0, + )(inputs['sex']), + tfl.layers.PWLCalibration( + input_keypoints=[0.0, 1.0, 2.0, 3.0], + output_min=0.0, + output_max=lattice_sizes[0] - 1.0, + # You can specify TFL regularizers as tuple + # ('regularizer name', l1, l2). + kernel_regularizer=('hessian', 0.0, 1e-4), + monotonicity='increasing', + )(inputs['ca']), + tfl.layers.CategoricalCalibration( + num_buckets=3, + output_min=0.0, + output_max=lattice_sizes[1] - 1.0, + # Categorical monotonicity can be partial order. + # (i, j) indicates that we must have output(i) <= output(i). + # Make sure to set the lattice monotonicity to 1 for this dimension. + monotonicities=[(0, 1), (0, 2)], + )(inputs['thal']), + ]) + output = tfl.layers.Lattice( + lattice_sizes=lattice_sizes, monotonicities=lattice_monotonicities)( + lattice_input) + + training = (mode == tf.estimator.ModeKeys.TRAIN) + model = tf.keras.Model(inputs=inputs, outputs=output) + logits = model(input_tensors, training=training) + + if training: + optimizer = optimizers.get_optimizer_instance_v2('Adam', + FLAGS.learning_rate) + else: + optimizer = None + + head = binary_class_head.BinaryClassHead() + return head.create_estimator_spec( + features=features, + mode=mode, + labels=labels, + optimizer=optimizer, + logits=logits, + trainable_variables=model.trainable_variables, + update_ops=model.updates) + + estimator = tf.estimator.Estimator(model_fn=model_fn) + estimator.train(input_fn=train_input_fn) + results = estimator.evaluate(input_fn=test_input_fn) + print('Results: {}'.format(results)) + + +if __name__ == '__main__': + app.run(main) diff --git a/examples/estimator_test.py b/examples/estimator_test.py deleted file mode 100644 index f00b063..0000000 --- a/examples/estimator_test.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A quick test script for TensorFlow Lattice's calibrated RTL estimator.""" -import numpy as np - -import tensorflow as tf -import tensorflow_lattice as tfl - -# Feature definition. -feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), -] - -# Hyperparameters. -num_keypoints = 10 -hparams = tfl.CalibratedRtlHParams( - num_keypoints=num_keypoints, - num_lattices=5, - lattice_rank=2, - learning_rate=0.1) -def init_fn(): - return tfl.uniform_keypoints_for_signal(num_keypoints, - input_min=-1.0, - input_max=1.0, - output_min=0.0, - output_max=1.0) - -# Estimator. -rtl_estimator = tfl.calibrated_rtl_regressor(feature_columns=feature_columns, - hparams=hparams, - keypoints_initializers_fn=init_fn) - -# Prepare the dataset. -num_examples = 1000 -x0 = np.random.uniform(-1.0, 1.0, size=num_examples) -x1 = np.random.uniform(-1.0, 1.0, size=num_examples) -y = x0 ** 2 + x1 ** 2 - -# Example input function. -twod_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x={ - 'x0': x0, - 'x1': x1 - }, y=y, batch_size=10, num_epochs=1, shuffle=False) - -# Train! -rtl_estimator.train(input_fn=twod_input_fn) -# Evaluate! -print(rtl_estimator.evaluate(input_fn=twod_input_fn)) diff --git a/examples/etl_1d.py b/examples/etl_1d.py deleted file mode 100644 index 28ef2a1..0000000 --- a/examples/etl_1d.py +++ /dev/null @@ -1,310 +0,0 @@ -# Copyright 2018 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Trains a small (2 inputs, single lattice) on toy data and visualizes it.""" -from __future__ import print_function - -import tempfile -import matplotlib.pyplot as plt -import numpy as np -import scipy -import tensorflow as tf -import tensorflow_lattice as tfl - -np.random.seed(1) - -_FEATURE_KEYPOINTS = 'tfl_calibrated_etl/pwl_calibration/X_{}_keypoints_' -_EMBED_KEYPOINTS = 'tfl_calibrated_etl/non_monotonic_lattices/' -_EMBED_KEYPOINTS += 'pwl_calibration/signal_{}_keypoints_' -_LATTICE_PARAMS = 'tfl_calibrated_etl/non_monotonic_lattices/lattice_{}/' -_LATTICE_PARAMS += 'hypercube_lattice_parameters' - - -def annulus_data(n_points, r_0, r_1): - """Creates toy dataset in quadrant I with a quarter annulus. - - Args: - n_points: (int) number of points - r_0: (float) inner bounding radius - r_1: (float) outer bounding radius - - Returns: - x: (np.Array) covariates - y: (np.Array) labels - """ - x = np.random.random(size=(n_points, 2)) - r = (x**2).sum(1)**.5 - y = (r_0 < r) & (r < r_1) - return x, y.astype(int) - - -def fit_model(x, - y, - lattice_size=5, - non_monotonic_num_lattices=1, - non_monotonic_lattice_rank=1): - """Fits a single 1D lattice to the provided data. - - Args: - x: covariates - y: labels - lattice_size: (int, optional) Number of knots in each lattice dimension, - total knots is lattice_size^lattice_rank, for each lattice - non_monotonic_num_lattices: (int, optional) - non_monotonic_lattice_rank: (int, optional) number of inputs to each - - Returns: - etl_estimator: fitted TF Estimator - """ - # Hyperparameters. - num_keypoints = 100 - hparams = tfl.CalibratedEtlHParams( - non_monotonic_lattice_rank=non_monotonic_lattice_rank, - non_monotonic_num_lattices=non_monotonic_num_lattices, - non_monotonic_lattice_size=lattice_size, - num_keypoints=num_keypoints, - learning_rate=0.007, - linear_embedding_calibration_num_keypoints=100) - - # Estimator. - feature_columns = [ - tf.feature_column.numeric_column('X_0'), - tf.feature_column.numeric_column('X_1'), - ] - - # Training is sensitive to initialization - config = tf.estimator.RunConfig(tf_random_seed=1) - def keypoints_config(): - return tfl.uniform_keypoints_for_signal( - num_keypoints, - input_min=0.0, - input_max=x.max(), - output_min=0.0, - output_max=lattice_size - 1 - ) - etl_estimator = tfl.calibrated_etl_classifier( - feature_columns=feature_columns, - hparams=hparams, - keypoints_initializers_fn=keypoints_config, - config=config - ) - - # Input function. - input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x={ - 'X_0': x[:, 0], - 'X_1': x[:, 1] - }, - y=y.flatten(), - batch_size=10000, - num_epochs=100, - shuffle=False) - - # Train! - etl_estimator.train(input_fn=input_fn) - - # Evaluate - eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x={ - 'X_0': x[:, 0], - 'X_1': x[:, 1] - }, - y=y.flatten(), - batch_size=10000, - num_epochs=1, - shuffle=False) - print(etl_estimator.evaluate(input_fn=eval_input_fn)) - - return etl_estimator - - -def _get_calibration_params(estimator, dim, weight_key, prefix): - """Helps extract calibration parameters from TFL graph.""" - input_key = '{}_keypoints_inputs'.format(prefix) - output_key = '{}_keypoints_outputs'.format(prefix) - calibrator_key = '{}_calibrators'.format(prefix) - - params = {} - params[input_key], params[output_key], params[calibrator_key] = [], [], [] - for i in xrange(dim): - params[input_key].append( - estimator.get_variable_value(weight_key.format(i) + 'inputs')) - params[output_key].append( - estimator.get_variable_value(weight_key.format(i) + 'outputs')) - params[calibrator_key].append( - scipy.interpolate.interp1d( - params[input_key][-1], - params[output_key][-1], - fill_value='extrapolate')) - return params - - -def _get_parameters(etl_estimator): - """Extracts all parameters necessary to evaluate an ETL from estimator.""" - params = {} - params['embed_weighting'] = etl_estimator.get_variable_value( - 'tfl_calibrated_etl/linear_embedding/split_non_monotone/monotone_linear' - '/weight') - params['embed_bias'] = etl_estimator.get_variable_value( - 'tfl_calibrated_etl/linear_embedding/split_non_monotone/monotone_linear' - '/bias') - params['final_bias'] = etl_estimator.get_variable_value( - 'tfl_calibrated_etl/ensemble_average/ensemble_bias') - params['n_embed'] = params['embed_weighting'].shape[0] - params['n_feature'] = params['embed_weighting'].shape[1] - - params.update( - _get_calibration_params(etl_estimator, params['n_feature'], - _FEATURE_KEYPOINTS, 'feature')) - - params.update( - _get_calibration_params( - etl_estimator, - params['n_embed'], - _EMBED_KEYPOINTS, - 'embed', - )) - - n, ws = 0, [] - while _LATTICE_PARAMS.format(n) in etl_estimator.get_variable_names(): - ws.append(etl_estimator.get_variable_value(_LATTICE_PARAMS.format(n))) - n += 1 - params['lattice_knots'] = np.vstack(ws) - - return params - - -def _apply_callibration(x, calibrators): - x_ = x.copy() - for n in xrange(x.shape[1]): - x_[:, n] = calibrators[n](x[:, n]) - return x_ - - -def _compress_0_1(x): - return (x - x.min()) / (x.max() - x.min()) - - -def plot_all(etl_estimator, x, y, save_dir): - """Makes visualizations of ETL Estimator. - - Args: - etl_estimator: (TF ETL Estimator) - x: (np.Array) inputs - y: (np.Array) labels, in [0, 1] - save_dir: (string) directory for saving visualizations - """ - params = _get_parameters(etl_estimator) - - x_cal = _apply_callibration(x, params['feature_calibrators']) - x_cal_emb = x_cal.dot(params['embed_weighting'].T) + params['embed_bias'] - x_cal_emb_cal = _apply_callibration(x_cal_emb, params['embed_calibrators']) - x_cal_emb_cal_lat = np.zeros_like(x_cal_emb_cal) - for i in xrange(params['lattice_knots'].shape[0]): - interpolator = scipy.interpolate.interp1d( - range(params['lattice_knots'].shape[1]), - params['lattice_knots'][i], - fill_value='extrapolate') - x_cal_emb_cal_lat[:, i] = interpolator(x_cal_emb_cal[:, i]) - - predictions = (x_cal_emb_cal_lat.mean(1) + params['final_bias'] > - .5).astype(int) - - plt.figure() - plt.title('Input Points Colored By Correct Classification') - plt.scatter(x[:10000, 0], x[:10000, 1], c=y[:10000], alpha=.3) - plt.savefig(save_dir + '/labeled.png') - - for i, (inputs, outputs) in enumerate( - zip(params['feature_keypoints_inputs'], - params['feature_keypoints_outputs'])): - plt.figure() - plt.title('Calibration Keypoints For Input Column Number {}'.format(i)) - plt.scatter(inputs, outputs) - plt.savefig(save_dir + '/feature_cal_{}.png'.format(i)) - - for i, (inputs, outputs) in enumerate( - zip(params['embed_keypoints_inputs'], params['embed_keypoints_outputs'])): - plt.figure() - plt.title('Calibration Keypoints For Emedding Number {}'.format(i)) - plt.scatter(inputs, outputs) - plt.savefig(save_dir + '/embed_cal_{}.png'.format(i)) - - for i in xrange(params['lattice_knots'].shape[0]): - plt.figure() - plt.title('Lattice knots for lattice number {}'.format(i)) - plt.plot( - range(params['lattice_knots'].shape[1]), params['lattice_knots'][i]) - plt.savefig(save_dir + '/lattice_{}.png'.format(i)) - - plt.figure() - plt.title('Input Points After Calibration, Colored By Correct Classification') - plt.scatter(x_cal[:10000, 0], x_cal[:10000, 1], c=y[:10000], alpha=.3) - plt.savefig(save_dir + '/calibrated.png') - - plt.figure() - plt.title('Input Points Colored By Value' - ' After Calibration and linear transformation') - plt.scatter( - x[:10000, 0], - x[:10000, 1], - c=_compress_0_1(x_cal_emb[:10000, 0]), - alpha=.3) - plt.savefig(save_dir + '/embed_colored.png') - - plt.figure() - plt.title('Input Points Colored By Value After Calibration,' - '\n Linear Transformation, Second Calibration') - plt.scatter( - x[:10000, 0], - x[:10000, 1], - c=_compress_0_1(x_cal_emb_cal[:10000, 0]), - alpha=.3) - plt.savefig(save_dir + '/embed_calibrated_colored.png') - - plt.figure() - plt.title('Input Points Colored by Value After Calibration,' - '\nlinear transformation, second calibration, and 1D lattice') - plt.scatter( - x[:10000, 0], - x[:10000, 1], - c=_compress_0_1(x_cal_emb_cal_lat[:10000, 0]), - alpha=.3) - plt.savefig(save_dir + '/lattice_colored.png') - - plt.figure() - plt.title('Predictions') - plt.scatter( - x[:10000, 0], - x[:10000, 1], - c=_compress_0_1(predictions)[:10000], - alpha=.3) - plt.savefig(save_dir + '/predictions.png') - - -def main(): - # Make data - x, y = annulus_data(300000, .5, .8) - - # Train model - etl_estimator = fit_model(x, y) - - # Visualize - temp_dir = tempfile.mkdtemp() - print('Saving figures to {}'.format(temp_dir)) - plot_all(etl_estimator, x, y, temp_dir) - - -if __name__ == '__main__': - main() diff --git a/examples/image_compression.py b/examples/image_compression.py deleted file mode 100644 index 02e7885..0000000 --- a/examples/image_compression.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright 2018 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A quick example of TensorFlow Lattice's calibrated RTL estimator.""" -from __future__ import print_function -import sys -import tempfile -import matplotlib.pyplot as plt -import numpy as np -import tensorflow as tf -import tensorflow_lattice as tfl - - -def _pixels(im): - out = np.zeros((im.shape[0] * im.shape[1], 3)) - out[:, 0] = np.repeat(np.arange(im.shape[0]), im.shape[1]) - out[:, 1] = np.tile(np.arange(im.shape[1]), im.shape[0]) - out[:, 2] = im.ravel() - return out - - -def _pixels_to_image(pixels): - out = np.zeros((int(pixels[:, 0].max() + 1), int(pixels[:, 1].max() + 1))) - out[pixels[:, 0].astype(int), pixels[:, 1].astype(int)] = pixels[:, 2] - return out - - -def run_image(image_path, lattice_size=35): - """Reads image and fits a 2D lattice to compress it.""" - im = plt.imread(image_path)[:, :, 2] - im_pixels = _pixels(im) - - print('compression ratio is ', lattice_size**2 / float(im.size)) - - # Hyperparameters. - num_keypoints = 2 - hparams = tfl.CalibratedRtlHParams( - num_keypoints=num_keypoints, - num_lattices=1, - lattice_rank=2, - learning_rate=0.003, - lattice_size=lattice_size) - - # Estimator. - # input: coordinate of the pixel - # output: value of the pixel - feature_columns = [ - tf.feature_column.numeric_column('pixel_x'), - tf.feature_column.numeric_column('pixel_y'), - ] - - def keypoints_initializers(): - return tfl.uniform_keypoints_for_signal( - num_keypoints, - input_min=0.0, - input_max=im_pixels.max(), - output_min=0.0, - output_max=lattice_size - 1 - ) - rtl_estimator = tfl.calibrated_rtl_regressor( - feature_columns=feature_columns, - hparams=hparams, - keypoints_initializers_fn=keypoints_initializers - ) - - # Example input function. - input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x={ - 'pixel_x': im_pixels[:, 0], - 'pixel_y': im_pixels[:, 1] - }, - y=im_pixels[:, 2], - batch_size=5000, - num_epochs=15, - shuffle=True) - - # Train! - rtl_estimator.train(input_fn=input_fn) - - # Evaluate! - eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x={ - 'pixel_x': im_pixels[:, 0], - 'pixel_y': im_pixels[:, 1] - }, - y=im_pixels[:, 2], - batch_size=5000, - num_epochs=1, - shuffle=True) - print(rtl_estimator.evaluate(input_fn=eval_input_fn)) - - return rtl_estimator - - -def visualize(estimator, input_img_path, output_dir): - """Visualizes trained estimator.""" - # This example pulls one channel, also would make sense to convert to gray - im = plt.imread(input_img_path)[:, :, 2] - im_pixels = _pixels(im) - - input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x={ - 'pixel_x': im_pixels[:, 0], - 'pixel_y': im_pixels[:, 1] - }, - batch_size=10000, - num_epochs=1, - shuffle=False) - - y_test = np.array( - [q['predictions'] for q in estimator.predict(input_fn=input_fn)]) - img = _pixels_to_image(np.c_[im_pixels[:, :2], y_test]) - - plt.figure() - plt.imshow(img, cmap='gray') - plt.savefig(output_dir + '/image.png') - return img - - -def main(image_path): - """Fits image and provides visualization.""" - temp_dir = tempfile.mkdtemp() - print('Saving output to {}'.format(temp_dir)) - estimator = run_image(image_path) - visualize(estimator, image_path, temp_dir) - -if __name__ == '__main__': - input_image_path = sys.argv[1] - main(input_image_path) diff --git a/examples/keras_functional_uci_heart.py b/examples/keras_functional_uci_heart.py new file mode 100644 index 0000000..252616e --- /dev/null +++ b/examples/keras_functional_uci_heart.py @@ -0,0 +1,314 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""Example usage of TFL within Keras Functional API. + +This example builds and trains a calibrated lattice model for the UCI heart +dataset. + +"Calibrated lattice" is a commonly used architecture for datasets where number +of input features does not exceed ~15. + +"Calibrated lattice" assumes every feature being transformed by PWLCalibration +or CategoricalCalibration layers before nonlineary fusing result of calibration +within a lattice layer. + +The TFL package does not have any layers dedicated to processing of sparse +features but thanks to plug and play compatibility with any other Keras layers +we can take advantage of standard Keras embedding to handle sparse features. UCI +Heart dataset does not have any sparse features so for this example we replaced +PWLCalibration layer for feature 'age' with Embedding layer in order to +demonstrate such compatibility as well as advantage of monotonicity +constraints for semantically meaningful features. + +Generally when you manually combine TFL layers you should keep track of: +1) Ensuring that inputs to TFL layers are within expected range. + - Input range for PWLCalibration layer is defined by smallest and largest of + provided keypoints. + - Input range for Lattice layer is [0.0, lattice_sizes[d] - 1.0] for any + dimension d. + TFL layers can constraint their output to be within desired range. Feeding + output of other layers into TFL layers you might want to ensure that something + like sigmoid is used to constraint their output range. +2) Properly configure monotonicity. If your calibration layer is monotonic then + corresponding dimension of lattice layer should also be monotonic. + +This example uses functional API for Keras model construction. For an example of +sequential models with TFL layers see keras_sequential_uci_heart.py. + +In order to see how better generalization can be achieved with a properly +constrained PWLCalibration layer compared to a vanila embedding layer, compare +training and validation losses of this model with one defined in +keras_sequential_uci_heart.py + +Note that the specifics of layer configurations are for demonstration purposes +and might not result in optimal performance. + +Example usage: +keras_functional_uci_heart +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import app +from absl import flags + +import numpy as np +import pandas as pd + +import tensorflow as tf +from tensorflow import keras +import tensorflow_lattice as tfl + +FLAGS = flags.FLAGS +flags.DEFINE_integer('num_epochs', 200, 'Number of training epoch.') + + +def main(_): + # UCI Statlog (Heart) dataset. + csv_file = tf.keras.utils.get_file( + 'heart.csv', 'http://storage.googleapis.com/applied-dl/heart.csv') + training_data_df = pd.read_csv(csv_file).sample( + frac=1.0, random_state=41).reset_index(drop=True) + + # Feature columns. + # 0 age + # 1 sex + # 2 cp chest pain type (4 values) + # 3 trestbps resting blood pressure + # 4 chol serum cholestoral in mg/dl + # 5 fbs fasting blood sugar > 120 mg/dl + # 6 restecg resting electrocardiographic results (values 0,1,2) + # 7 thalach maximum heart rate achieved + # 8 exang exercise induced angina + # 9 oldpeak ST depression induced by exercise relative to rest + # 10 slope the slope of the peak exercise ST segment + # 11 ca number of major vessels (0-3) colored by flourosopy + # 12 thal 3 = normal; 6 = fixed defect; 7 = reversable defect + + # Example slice of training data: + # age sex cp trestbps chol fbs restecg thalach exang oldpeak + # 0 63 1 1 145 233 1 2 150 0 2.3 + # 1 67 1 4 160 286 0 2 108 1 1.5 + # 2 67 1 4 120 229 0 2 129 1 2.6 + # 3 37 1 3 130 250 0 0 187 0 3.5 + # 4 41 0 2 130 204 0 2 172 0 1.4 + # 5 56 1 2 120 236 0 0 178 0 0.8 + # 6 62 0 4 140 268 0 2 160 0 3.6 + # 7 57 0 4 120 354 0 0 163 1 0.6 + # 8 63 1 4 130 254 0 2 147 0 1.4 + # 9 53 1 4 140 203 1 2 155 1 3.1 + + model_inputs = [] + lattice_inputs = [] + # We are going to have 2-d embedding as one of lattice inputs. + lattice_sizes_for_embedding = [2, 3] + lattice_sizes = lattice_sizes_for_embedding + [2, 2, 3, 3, 2, 2] + + # ############### age ############### + + age_input = keras.layers.Input(shape=[1]) + model_inputs.append(age_input) + age_embedding = keras.layers.Embedding( + input_dim=10, + output_dim=len(lattice_sizes_for_embedding), + embeddings_initializer=keras.initializers.RandomNormal(seed=1) + )(age_input) + # Flatten to get rid of redundant tensor dimension created by embedding layer. + age_embedding = keras.layers.Flatten()(age_embedding) + + # Lattice expects input data for lattice dimension d to be within + # [0, lattice_sizes[d]-1.0]. Apply sigmoid and multiply it by input range to + # ensure that lattice inputs are within expected range. + embedding_lattice_input_range = tf.constant( + [size - 1.0 for size in lattice_sizes_for_embedding], + # Insert dimension of size 1 in front to ensure that batch dimension + # will not collapse as result of multiplication. + shape=(1, 2)) + age_ranged = keras.layers.multiply( + [keras.activations.sigmoid(age_embedding), + embedding_lattice_input_range]) + lattice_inputs.append(age_ranged) + + # ############### sex ############### + + # For boolean features simply specify CategoricalCalibration layer with 2 + # buckets. + sex_input = keras.layers.Input(shape=[1]) + model_inputs.append(sex_input) + sex_calibrator = tfl.layers.CategoricalCalibration( + num_buckets=2, + output_min=0.0, + output_max=lattice_sizes[2] - 1.0, + # Initializes all outputs to (output_min + output_max) / 2.0. + kernel_initializer='constant', + )(sex_input) + lattice_inputs.append(sex_calibrator) + + # ############### cp ############### + + cp_input = keras.layers.Input(shape=[1]) + model_inputs.append(cp_input) + cp_calibrator = tfl.layers.PWLCalibration( + # Here instead of specifying dtype of layer we convert keypoints into + # np.float32. + input_keypoints=np.linspace(1, 4, num=4, dtype=np.float32), + output_min=0.0, + output_max=lattice_sizes[3] - 1.0, + monotonicity='increasing', + # You can specify TFL regularizers as tuple ('regularizer name', l1, l2). + kernel_regularizer=('hessian', 0.0, 1e-4) + )(cp_input) + lattice_inputs.append(cp_calibrator) + + # ############### trestbps ############### + + trestbps_input = keras.layers.Input(shape=[1]) + model_inputs.append(trestbps_input) + trestbps_calibrator = tfl.layers.PWLCalibration( + # Alternatively to uniform keypoints you might want to use quantiles as + # keypoints. + input_keypoints=np.quantile( + training_data_df['trestbps'], np.linspace(0.0, 1.0, num=5)), + dtype=tf.float32, + # Together with quantile keypoints you might want to initialize piecewise + # linear function to have 'equal_slopes' in order for output of layer + # after initialization to preserve original distribution. + kernel_initializer='equal_slopes', + output_min=0.0, + output_max=lattice_sizes[4] - 1.0, + # You might consider clamping extreme inputs of the calibrator to output + # bounds. + clamp_min=True, + clamp_max=True, + monotonicity='increasing', + )(trestbps_input) + lattice_inputs.append(trestbps_calibrator) + + # ############### chol ############### + + chol_input = keras.layers.Input(shape=[1]) + model_inputs.append(chol_input) + chol_calibrator = tfl.layers.PWLCalibration( + # Explicit input keypoint initialization. + input_keypoints=[126.0, 210.0, 247.0, 286.0, 564.0], + output_min=0.0, + output_max=lattice_sizes[5] - 1.0, + # Monotonicity of calibrator can be decreasing. Note that corresponding + # lattice dimension must have INCREASING monotonicity regardless of + # monotonicity direction of calibrator. + # Its not some weird configuration hack. Its just how math works :) + monotonicity='decreasing', + # Convexity together with decreasing monotonicity result in diminishing + # return constraint. + convexity='convex', + # You can specify list of regularizers. You are not limited to TFL + # regularizrs. Feel free to use any :) + kernel_regularizer=[('laplacian', 0.0, 1e-4), + keras.regularizers.l1_l2(l1=0.001)] + )(chol_input) + lattice_inputs.append(chol_calibrator) + + # ############### fbs ############### + + fbs_input = keras.layers.Input(shape=[1]) + model_inputs.append(fbs_input) + fbs_calibrator = tfl.layers.CategoricalCalibration( + num_buckets=2, + output_min=0.0, + output_max=lattice_sizes[6] - 1.0, + # For categorical calibration layer monotonicity is specified for pairs + # of indices of categories. Output for first category in pair will be + # smaller than output for second category. + # + # Don't forget to set monotonicity of corresponding dimension of Lattice + # layer to 'increasing'. + monotonicities=[(0, 1)], + # This initializer is identical to default one ('uniform'), but has fixed + # seed in order to simplify experimentation. + kernel_initializer=keras.initializers.RandomUniform( + minval=0.0, maxval=lattice_sizes[5] - 1.0, seed=1), + )(fbs_input) + lattice_inputs.append(fbs_calibrator) + + # ############### restecg ############### + + restecg_input = keras.layers.Input(shape=[1]) + model_inputs.append(restecg_input) + restecg_calibrator = tfl.layers.CategoricalCalibration( + num_buckets=3, + output_min=0.0, + output_max=lattice_sizes[7] - 1.0, + # Categorical monotonicity can be partial order. + monotonicities=[(0, 1), (0, 2)], + # Categorical calibration layer supports standard Keras regularizers. + kernel_regularizer=keras.regularizers.l1_l2(l1=0.001), + kernel_initializer='constant', + )(restecg_input) + lattice_inputs.append(restecg_calibrator) + + # Lattice inputs must be either list of d tensors of rank (batch_size, 1) or + # single tensor of rank (batch_size, d) where d is dimensionality of lattice. + # Since our embedding layer has size 2 in second dimension - concatenate all + # of inputs to create single tensor. + lattice_inputs_tensor = keras.layers.concatenate(lattice_inputs, axis=1) + + # Create Lattice layer to nonlineary fuse output of calibrators. Don't forget + # to specify 'increasing' monotonicity for any dimension for which + # monotonicity is configured regardless of monotonicity direction of those. + # This includes partial monotonicity of CategoricalCalibration layer. + # Note that making embedding inputs monotonic does not make sense. + lattice = tfl.layers.Lattice( + lattice_sizes=lattice_sizes, + monotonicities=['none', 'none', 'none', 'increasing', 'increasing', + 'increasing', 'increasing', 'increasing'], + output_min=0.0, + output_max=1.0, + )(lattice_inputs_tensor) + + model = keras.models.Model( + inputs=model_inputs, + outputs=lattice) + model.compile(loss=keras.losses.mean_squared_error, + optimizer=keras.optimizers.Adagrad(learning_rate=1.0)) + + feature_names = ['age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg'] + features = np.split(training_data_df[feature_names].values.astype(np.float32), + indices_or_sections=len(feature_names), + axis=1) + target = training_data_df[['target']].values.astype(np.float32) + + # Bucketize input for embedding. + embedding_bins = np.quantile( + features[0], + # 10 keypoints will produce 9 dims numbered 1..9 to match embedding input + # size of 10. + np.linspace(0.0, 1.0, num=10, dtype=np.float32)) + # Ensure that highest age will get into last bin rather than its own one. + embedding_bins[-1] += 1.0 + features[0] = np.digitize(features[0], bins=embedding_bins) + + model.fit(features, + target, + batch_size=32, + epochs=FLAGS.num_epochs, + validation_split=0.2, + shuffle=False) + + +if __name__ == '__main__': + app.run(main) diff --git a/examples/keras_sequential_uci_heart.py b/examples/keras_sequential_uci_heart.py new file mode 100644 index 0000000..3c721ec --- /dev/null +++ b/examples/keras_sequential_uci_heart.py @@ -0,0 +1,275 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3 +"""Example usage of TFL within Keras models. + +This example builds and trains a calibrated lattice model for the UCI heart +dataset. + +"Calibrated lattice" is a commonly used architecture for datasets where number +of input features does not exceed ~15. + +"Calibrated lattice" assumes every feature being transformed by PWLCalibration +or CategoricalCalibration layers before nonlineary fusing result of calibration +within a lattice layer. + +Generally when you manually combine TFL layers you should keep track of: +1) Ensuring that inputs to TFL layers are within expected range. + - Input range for PWLCalibration layer is defined by smallest and largest of + provided keypoints. + - Input range for Lattice layer is [0.0, lattice_sizes[d] - 1.0] for any + dimension d. + TFL layers can constraint their output to be within desired range. Feeding + output of other layers into TFL layers you might want to ensure that something + like sigmoid is used to constraint their output range. +2) Properly configure monotonicity. If your calibration layer is monotonic then + corresponding dimension of lattice layer should also be monotonic. + +This example creates a Sequential Keras model and only uses TFL layers. For an +example of functional model construction that also use embedding layers see +keras_functional_uci_heart.py. + +In order to see how better generalization can be achieved with a properly +constrained PWLCalibration layer compared to a vanila embedding layer, compare +training and validation losses of this model with one defined in +keras_functional_uci_heart.py + + +Note that the specifics of layer configurations are for demonstration purposes +and might not result in optimal performance. + +Example usage: +keras_sequential_uci_heart +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import app +from absl import flags + +import numpy as np +import pandas as pd + +import tensorflow as tf +from tensorflow import keras +import tensorflow_lattice as tfl + +FLAGS = flags.FLAGS +flags.DEFINE_integer('num_epochs', 200, 'Number of training epoch.') + + +def main(_): + # UCI Statlog (Heart) dataset. + csv_file = tf.keras.utils.get_file( + 'heart.csv', 'http://storage.googleapis.com/applied-dl/heart.csv') + training_data_df = pd.read_csv(csv_file).sample( + frac=1.0, random_state=41).reset_index(drop=True) + + # Feature columns. + # 0 age + # 1 sex + # 2 cp chest pain type (4 values) + # 3 trestbps resting blood pressure + # 4 chol serum cholestoral in mg/dl + # 5 fbs fasting blood sugar > 120 mg/dl + # 6 restecg resting electrocardiographic results (values 0,1,2) + # 7 thalach maximum heart rate achieved + # 8 exang exercise induced angina + # 9 oldpeak ST depression induced by exercise relative to rest + # 10 slope the slope of the peak exercise ST segment + # 11 ca number of major vessels (0-3) colored by flourosopy + # 12 thal 3 = normal; 6 = fixed defect; 7 = reversable defect + + # Example slice of training data: + # age sex cp trestbps chol fbs restecg thalach exang oldpeak + # 0 63 1 1 145 233 1 2 150 0 2.3 + # 1 67 1 4 160 286 0 2 108 1 1.5 + # 2 67 1 4 120 229 0 2 129 1 2.6 + # 3 37 1 3 130 250 0 0 187 0 3.5 + # 4 41 0 2 130 204 0 2 172 0 1.4 + # 5 56 1 2 120 236 0 0 178 0 0.8 + # 6 62 0 4 140 268 0 2 160 0 3.6 + # 7 57 0 4 120 354 0 0 163 1 0.6 + # 8 63 1 4 130 254 0 2 147 0 1.4 + # 9 53 1 4 140 203 1 2 155 1 3.1 + + # Lattice sizes per dimension for Lattice layer. + # Lattice layer expects input[i] to be within [0, lattice_sizes[i] - 1.0], so + # we need to define lattice sizes ahead of calibration layers so we can + # properly specify output range of calibration layers. + lattice_sizes = [3, 2, 2, 2, 2, 2, 2] + + # Use ParallelCombination helper layer to group togehter calibration layers + # which have to be executed in paralel in order to be able to use Sequential + # model. Alternatively you can use functional API. + combined_calibrators = tfl.layers.ParallelCombination() + + # Configure calibration layers for every feature: + + # ############### age ############### + + calibrator = tfl.layers.PWLCalibration( + # Every PWLCalibration layer must have keypoints of piecewise linear + # function specified. Easiest way to specify them is to uniformly cover + # entire input range by using numpy.linspace(). + input_keypoints=np.linspace(training_data_df['age'].min(), + training_data_df['age'].max(), + num=5), + # You need to ensure that input keypoints have same dtype as layer input. + # You can do it by setting dtype here or by providing keypoints in such + # format which will be converted to deisred tf.dtype by default. + dtype=tf.float32, + # Output range must correspond to expected lattice input range. + output_min=0.0, + output_max=lattice_sizes[0] - 1.0, + monotonicity='increasing') + combined_calibrators.append(calibrator) + + # ############### sex ############### + + # For boolean features simply specify CategoricalCalibration layer with 2 + # buckets. + calibrator = tfl.layers.CategoricalCalibration( + num_buckets=2, + output_min=0.0, + output_max=lattice_sizes[1] - 1.0, + # Initializes all outputs to (output_min + output_max) / 2.0. + kernel_initializer='constant') + combined_calibrators.append(calibrator) + + # ############### cp ############### + + calibrator = tfl.layers.PWLCalibration( + # Here instead of specifying dtype of layer we convert keypoints into + # np.float32. + input_keypoints=np.linspace(1, 4, num=4, dtype=np.float32), + output_min=0.0, + output_max=lattice_sizes[2] - 1.0, + monotonicity='increasing', + # You can specify TFL regularizers as tuple ('regularizer name', l1, l2). + kernel_regularizer=('hessian', 0.0, 1e-4)) + combined_calibrators.append(calibrator) + + # ############### trestbps ############### + + calibrator = tfl.layers.PWLCalibration( + # Alternatively to uniform keypoints you might want to use quantiles as + # keypoints. + input_keypoints=np.quantile( + training_data_df['trestbps'], np.linspace(0.0, 1.0, num=5)), + dtype=tf.float32, + # Together with quantile keypoints you might want to initialize piecewise + # linear function to have 'equal_slopes' in order for output of layer + # after initialization to preserve original distribution. + kernel_initializer='equal_slopes', + output_min=0.0, + output_max=lattice_sizes[3] - 1.0, + # You might consider clamping extreme inputs of the calibrator to output + # bounds. + clamp_min=True, + clamp_max=True, + monotonicity='increasing') + combined_calibrators.append(calibrator) + + # ############### chol ############### + + calibrator = tfl.layers.PWLCalibration( + # Explicit input keypoint initialization. + input_keypoints=[126.0, 210.0, 247.0, 286.0, 564.0], + dtype=tf.float32, + output_min=0.0, + output_max=lattice_sizes[4] - 1.0, + # Monotonicity of calibrator can be 'decreasing'. Note that corresponding + # lattice dimension must have 'increasing' monotonicity regardless of + # monotonicity direction of calibrator. + # Its not some weird configuration hack. Its just how math works :) + monotonicity='decreasing', + # Convexity together with decreasing monotonicity result in diminishing + # return constraint. + convexity='convex', + # You can specify list of regularizers. You are not limited to TFL + # regularizrs. Feel free to use any :) + kernel_regularizer=[('laplacian', 0.0, 1e-4), + keras.regularizers.l1_l2(l1=0.001)]) + combined_calibrators.append(calibrator) + + # ############### fbs ############### + + calibrator = tfl.layers.CategoricalCalibration( + num_buckets=2, + output_min=0.0, + output_max=lattice_sizes[5] - 1.0, + # For categorical calibration layer monotonicity is specified for pairs + # of indices of categories. Output for first category in pair will be + # smaller than output for second category. + # + # Don't forget to set monotonicity of corresponding dimension of Lattice + # layer to 'increasing'. + monotonicities=[(0, 1)], + # This initializer is identical to default one('uniform'), but has fixed + # seed in order to simplify experimentation. + kernel_initializer=keras.initializers.RandomUniform( + minval=0.0, maxval=lattice_sizes[5] - 1.0, seed=1)) + combined_calibrators.append(calibrator) + + # ############### restecg ############### + + calibrator = tfl.layers.CategoricalCalibration( + num_buckets=3, + output_min=0.0, + output_max=lattice_sizes[6] - 1.0, + # Categorical monotonicity can be partial order. + monotonicities=[(0, 1), (0, 2)], + # Categorical calibration layer supports standard Keras regularizers. + kernel_regularizer=keras.regularizers.l1_l2(l1=0.001), + kernel_initializer='constant') + combined_calibrators.append(calibrator) + + # Create Lattice layer to nonlineary fuse output of calibrators. Don't forget + # to specify monotonicity 'increasing' for any dimension which calibrator is + # monotonic regardless of monotonicity direction of calibrator. This includes + # partial monotonicity of CategoricalCalibration layer. + lattice = tfl.layers.Lattice( + lattice_sizes=lattice_sizes, + monotonicities=['increasing', 'none', 'increasing', 'increasing', + 'increasing', 'increasing', 'increasing'], + output_min=0.0, + output_max=1.0) + + model = keras.models.Sequential() + # We have just 2 layer as far as Sequential model is concerned. + # PWLConcatenate layer takes care of grouping calibrators. + model.add(combined_calibrators) + model.add(lattice) + model.compile(loss=keras.losses.mean_squared_error, + optimizer=keras.optimizers.Adagrad(learning_rate=1.0)) + + features = training_data_df[ + ['age', 'sex', 'cp', + 'trestbps', 'chol', 'fbs', 'restecg']].values.astype(np.float32) + target = training_data_df[['target']].values.astype(np.float32) + + model.fit(features, + target, + batch_size=32, + epochs=FLAGS.num_epochs, + validation_split=0.2, + shuffle=False) + + +if __name__ == '__main__': + app.run(main) diff --git a/examples/lattice_test.py b/examples/lattice_test.py deleted file mode 100644 index a441086..0000000 --- a/examples/lattice_test.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A quick test script for TensorFlow Lattice's lattice layer.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf -import tensorflow_lattice as tfl - -x = tf.compat.v1.placeholder(tf.float32, shape=(None, 2)) -(y, _, _, _) = tfl.lattice_layer(x, lattice_sizes=(2, 2)) - -with tf.Session() as sess: - sess.run(tf.global_variables_initializer()) - print(sess.run(y, feed_dict={x: [[0.0, 0.0]]})) diff --git a/examples/uci_census.py b/examples/uci_census.py deleted file mode 100644 index 55bbc5a..0000000 --- a/examples/uci_census.py +++ /dev/null @@ -1,557 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Train and evaluate models on UCI Census data. - -This is an example TensorFlow Lattice model training and evaluating program, -using TensorFlow's `tf.estimators` library, a high level abstraction layer -for machine learning models. - -TensorFlow Lattice also offers "layer" level components, so one can customize -their own models, but these are not included in this example. - -Example run for calibrated linear model: - -* Uses bash variables `type` and `attempt` for convenience. You can bump - `attempt` when trying different hyper-parameters. -* The flag `--create_quantiles` need to be set just the very first time you - run, since the data quantiles information used for calibration is the same - for all models. -* Use `--hparams` to set changes to default parameters. -* It will print out evaluation on the training data and evaluation data - every 1/10th of the training epochs. - -```bash -$ type=calibrated_linear ; attempt=1 ; - python uci_census.py --run=train --model_type=${type} - --output_dir=${HOME}/experiments/uci_census/${type}_${attempt} - --quantiles_dir=${HOME}/experiments/uci_census - --train_epochs=600 --batch_size=1000 - --hparams=learning_rate=1e-3 - --create_quantiles -``` - -Example run for calibrated RTL model (assumes you already created the -quantiles): - -* Notice calibrated RTL models train slower than calibrated linear model, but -should yield slightly better results. - -```bash -$ type=calibrated_rtl ; attempt=1 ; - python uci_census.py --run=train --model_type=${type} - --output_dir=${HOME}/experiments/uci_census/${type}_${attempt} - --quantiles_dir=${HOME}/experiments/uci_census - --train_epochs=600 --batch_size=1000 - --hparams=learning_rate=1e-2 -``` - -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import tempfile - -import pandas as pd -import six -import tensorflow as tf -import tensorflow_lattice as tfl - -flags = tf.flags -FLAGS = flags.FLAGS - -# Run mode of the program. -flags.DEFINE_string( - "run", "train", "One of 'train', 'evaluate' or 'save', train will " - "train on training data and also optionally evaluate; evaluate will " - "evaluate train and test data; save saves the trained model so far " - "so it can be used by TensorFlow Serving.") - -# Dataset. -flags.DEFINE_string("test", "/tmp/uci_census/adult.test", "Path to test file.") -flags.DEFINE_string("train", "/tmp/uci_census/adult.data", - "Path to train file.") - -# Model flags. -flags.DEFINE_string( - "output_dir", None, - "Directory where to store the model. If not set a temporary directory " - "will be automatically created.") -flags.DEFINE_string( - "model_type", "calibrated_linear", - "Types defined in this example: calibrated_linear, calibrated_lattice, " - " calibrated_rtl, calibrated_etl, calibrated_dnn") -flags.DEFINE_integer("batch_size", 1000, - "Number of examples to include in one batch. Increase " - "this number to improve parallelism, at cost of memory.") -flags.DEFINE_string("hparams", None, - "Model hyperparameters, see hyper-parameters in Tensorflow " - "Lattice documentation. Example: --hparams=learning_rate=" - "0.1,lattice_size=2,num_keypoints=100") - -# Calibration quantiles flags. -flags.DEFINE_bool("create_quantiles", False, - "Run once to create histogram of features for calibration. " - "It will use the --train dataset for that.") -flags.DEFINE_string( - "quantiles_dir", None, - "Directory where to store quantile information, defaults to the model " - "directory (set by --output-dir) but since quantiles can be reused by " - "models with different parameters, you may want to have a separate " - "directory.") - -# Training flags. -flags.DEFINE_integer("train_epochs", 10, - "How many epochs over data during training.") -flags.DEFINE_bool( - "train_evaluate_on_train", True, - "If set, every 1/10th of the train_epochs runs an evaluation on the " - "full train data.") -flags.DEFINE_bool( - "train_evaluate_on_test", True, - "If set, every 1/10th of the train_epochs runs an evaluation on the " - "full test data.") - -# Columns in dataset files. -CSV_COLUMNS = [ - "age", "workclass", "fnlwgt", "education", "education_num", - "marital_status", "occupation", "relationship", "race", "gender", - "capital_gain", "capital_loss", "hours_per_week", "native_country", - "income_bracket" -] - - -def get_test_input_fn(batch_size, num_epochs, shuffle): - return get_input_fn(FLAGS.test, batch_size, num_epochs, shuffle) - - -def get_train_input_fn(batch_size, num_epochs, shuffle): - return get_input_fn(FLAGS.train, batch_size, num_epochs, shuffle) - - -# Copy of data read from train/test files: keep copy to avoid re-reading -# it at every training/evaluation loop. -_df_data = {} -_df_data_labels = {} - - -def get_input_fn(file_path, batch_size, num_epochs, shuffle): - """Returns an input_fn closure for given parameters.""" - if file_path not in _df_data: - _df_data[file_path] = pd.read_csv( - tf.gfile.Open(file_path), - names=CSV_COLUMNS, - skipinitialspace=True, - engine="python", - skiprows=1) - _df_data[file_path] = _df_data[file_path].dropna(how="any", axis=0) - _df_data_labels[file_path] = _df_data[file_path]["income_bracket"].apply( - lambda x: ">50K" in x).astype(int) - return tf.compat.v1.estimator.inputs.pandas_input_fn( - x=_df_data[file_path], - y=_df_data_labels[file_path], - batch_size=batch_size, - shuffle=shuffle, - num_epochs=num_epochs, - num_threads=1) - - -def create_feature_columns(): - """Creates feature columns for UCI Census, some are sparse.""" - # Categorical features. - gender = tf.feature_column.categorical_column_with_vocabulary_list( - "gender", ["Female", "Male"]) - education = tf.feature_column.categorical_column_with_vocabulary_list( - "education", [ - "Bachelors", "HS-grad", "11th", "Masters", "9th", "Some-college", - "Assoc-acdm", "Assoc-voc", "7th-8th", "Doctorate", "Prof-school", - "5th-6th", "10th", "1st-4th", "Preschool", "12th" - ]) - marital_status = tf.feature_column.categorical_column_with_vocabulary_list( - "marital_status", [ - "Married-civ-spouse", "Divorced", "Married-spouse-absent", - "Never-married", "Separated", "Married-AF-spouse", "Widowed" - ]) - relationship = tf.feature_column.categorical_column_with_vocabulary_list( - "relationship", [ - "Husband", "Not-in-family", "Wife", "Own-child", "Unmarried", - "Other-relative" - ]) - workclass = tf.feature_column.categorical_column_with_vocabulary_list( - "workclass", [ - "Self-emp-not-inc", "Private", "State-gov", "Federal-gov", - "Local-gov", "?", "Self-emp-inc", "Without-pay", "Never-worked" - ]) - occupation = tf.feature_column.categorical_column_with_vocabulary_list( - "occupation", [ - "Prof-specialty", "Craft-repair", "Exec-managerial", "Adm-clerical", - "Sales", "Other-service", "Machine-op-inspct", "?", - "Transport-moving", "Handlers-cleaners", "Farming-fishing", - "Tech-support", "Protective-serv", "Priv-house-serv", "Armed-Forces" - ]) - race = tf.feature_column.categorical_column_with_vocabulary_list( - "race", [ - "White", - "Black", - "Asian-Pac-Islander", - "Amer-Indian-Eskimo", - "Other", - ]) - native_country = tf.feature_column.categorical_column_with_vocabulary_list( - "native_country", [ - "United-States", - "Mexico", - "?", - "Philippines", - "Germany", - "Canada", - "Puerto-Rico", - "El-Salvador", - "India", - "Cuba", - "England", - "Jamaica", - "South", - "China", - "Italy", - "Dominican-Republic", - "Vietnam", - "Guatemala", - "Japan", - "Poland", - "Columbia", - "Taiwan", - "Haiti", - "Iran", - "Portugal", - "Nicaragua", - "Peru", - "Greece", - "France", - "Ecuador", - "Ireland", - "Hong", - "Trinadad&Tobago", - "Cambodia", - "Thailand", - "Laos", - "Yugoslavia", - "Outlying-US(Guam-USVI-etc)", - "Hungary", - "Honduras", - "Scotland", - "Holand-Netherlands", - ]) - - # Numerical (continuous) base columns. - age = tf.feature_column.numeric_column("age") - education_num = tf.feature_column.numeric_column("education_num") - capital_gain = tf.feature_column.numeric_column("capital_gain") - capital_loss = tf.feature_column.numeric_column("capital_loss") - hours_per_week = tf.feature_column.numeric_column("hours_per_week") - - # fnlwgt: this should be the weight, how representative this example is of - # the population, we don't use it here. - # fnlwgt = tf.feature_column.numeric_column("fnlwgt") - - # income-bracket is the label, so, not returned here. - return [ - age, - workclass, - education, - education_num, - marital_status, - occupation, - relationship, - race, - gender, - capital_gain, - capital_loss, - hours_per_week, - native_country, - ] - - -def create_quantiles(quantiles_dir): - """Creates quantiles directory if it doesn't yet exist.""" - batch_size = 10000 - input_fn = get_train_input_fn( - batch_size=batch_size, num_epochs=1, shuffle=False) - # Reads until input is exhausted, 10000 at a time. - tfl.save_quantiles_for_keypoints( - input_fn=input_fn, - save_dir=quantiles_dir, - feature_columns=create_feature_columns(), - num_steps=None) - - -def _pprint_hparams(hparams): - """Pretty-print hparams.""" - print("* hparams=[") - for (key, value) in sorted(six.iteritems(hparams.values())): - print("\t{}={}".format(key, value)) - print("]") - - -def create_calibrated_linear(feature_columns, config, quantiles_dir): - feature_names = [fc.name for fc in feature_columns] - hparams = tfl.CalibratedLinearHParams( - feature_names=feature_names, num_keypoints=200, learning_rate=1e-4) - hparams.parse(FLAGS.hparams) - hparams.set_feature_param("capital_gain", "calibration_l2_laplacian_reg", - 4.0e-3) - _pprint_hparams(hparams) - return tfl.calibrated_linear_classifier( - feature_columns=feature_columns, - model_dir=config.model_dir, - config=config, - hparams=hparams, - quantiles_dir=quantiles_dir) - - -def create_calibrated_lattice(feature_columns, config, quantiles_dir): - """Creates a calibrated lattice estimator.""" - feature_names = [fc.name for fc in feature_columns] - hparams = tfl.CalibratedLatticeHParams( - feature_names=feature_names, - num_keypoints=200, - lattice_l2_laplacian_reg=5.0e-3, - lattice_l2_torsion_reg=1.0e-4, - learning_rate=0.1, - lattice_size=2) - hparams.parse(FLAGS.hparams) - _pprint_hparams(hparams) - return tfl.calibrated_lattice_classifier( - feature_columns=feature_columns, - model_dir=config.model_dir, - config=config, - hparams=hparams, - quantiles_dir=quantiles_dir) - - -def create_calibrated_rtl(feature_columns, config, quantiles_dir): - """Creates a calibrated RTL estimator.""" - feature_names = [fc.name for fc in feature_columns] - hparams = tfl.CalibratedRtlHParams( - feature_names=feature_names, - num_keypoints=200, - learning_rate=0.02, - lattice_l2_laplacian_reg=5.0e-4, - lattice_l2_torsion_reg=1.0e-4, - lattice_size=3, - lattice_rank=4, - num_lattices=100) - # Specific feature parameters. - hparams.set_feature_param("capital_gain", "lattice_size", 8) - hparams.set_feature_param("native_country", "lattice_size", 8) - hparams.set_feature_param("marital_status", "lattice_size", 4) - hparams.set_feature_param("age", "lattice_size", 8) - hparams.parse(FLAGS.hparams) - _pprint_hparams(hparams) - return tfl.calibrated_rtl_classifier( - feature_columns=feature_columns, - model_dir=config.model_dir, - config=config, - hparams=hparams, - quantiles_dir=quantiles_dir) - - -def create_calibrated_etl(feature_columns, config, quantiles_dir): - """Creates a calibrated ETL estimator.""" - # No enforced monotonicity in this example. - feature_names = [fc.name for fc in feature_columns] - hparams = tfl.CalibratedEtlHParams( - feature_names=feature_names, - num_keypoints=200, - learning_rate=0.02, - non_monotonic_num_lattices=200, - non_monotonic_lattice_rank=2, - non_monotonic_lattice_size=2, - calibration_l2_laplacian_reg=4.0e-3, - lattice_l2_laplacian_reg=1.0e-5, - lattice_l2_torsion_reg=4.0e-4) - hparams.parse(FLAGS.hparams) - _pprint_hparams(hparams) - return tfl.calibrated_etl_classifier( - feature_columns=feature_columns, - model_dir=config.model_dir, - config=config, - hparams=hparams, - quantiles_dir=quantiles_dir) - - -def create_calibrated_dnn(feature_columns, config, quantiles_dir): - """Creates a calibrated DNN model.""" - # This is an example of a hybrid model that uses input calibration layer - # offered by TensorFlow Lattice library and connects it to a DNN. - feature_names = [fc.name for fc in feature_columns] - hparams = tfl.CalibratedHParams( - feature_names=feature_names, - num_keypoints=200, - learning_rate=1.0e-3, - calibration_output_min=-1.0, - calibration_output_max=1.0, - nodes_per_layer=10, # All layers have the same number of nodes. - layers=2, # Includes output layer, therefore >= 1. - ) - hparams.parse(FLAGS.hparams) - _pprint_hparams(hparams) - - def _model_fn(features, labels, mode, params): - """Model construction closure used when creating estimator.""" - del params # Hyper-params are read directly from the bound variable hparams - - # Calibrate: since there is no monotonicity, there are no projection ops. - # We also discard the ordered names of the features. - (output, _, _, regularization) = tfl.input_calibration_layer_from_hparams( - features, feature_columns, hparams, quantiles_dir) - - # Hidden-layers. - for _ in range(hparams.layers - 1): - output = tf.layers.dense( - inputs=output, units=hparams.nodes_per_layer, activation=tf.sigmoid) - - # Classifier logits and prediction. - logits = tf.layers.dense(inputs=output, units=1) - predictions = tf.reshape(tf.sigmoid(logits), [-1]) - - # Notice loss doesn't include regularization, which is added separately - # by means of tf.contrib.layers.apply_regularization(). - loss_no_regularization = tf.losses.log_loss(labels, predictions) - loss = loss_no_regularization - if regularization is not None: - loss += regularization - optimizer = tf.train.AdamOptimizer(learning_rate=hparams.learning_rate) - train_op = optimizer.minimize( - loss, - global_step=tf.train.get_global_step(), - name="calibrated_dnn_minimize") - - eval_metric_ops = { - "accuracy": tf.metrics.accuracy(labels, predictions), - - # We want to report the loss without the regularization, so metric is - # comparable with different regularizations. FutureWork, list both. - "average_loss": tf.metrics.mean(loss_no_regularization), - } - - return tf.estimator.EstimatorSpec(mode, predictions, loss, train_op, - eval_metric_ops) - - # Hyper-parameters are passed directly to the model_fn closure by the context. - return tf.estimator.Estimator( - model_fn=_model_fn, - model_dir=config.model_dir, - config=config, - params=None) - - -def create_estimator(config, quantiles_dir): - """Creates estimator for given configuration based on --model_type.""" - feature_columns = create_feature_columns() - if FLAGS.model_type == "calibrated_linear": - return create_calibrated_linear(feature_columns, config, quantiles_dir) - elif FLAGS.model_type == "calibrated_lattice": - return create_calibrated_lattice(feature_columns, config, quantiles_dir) - elif FLAGS.model_type == "calibrated_rtl": - return create_calibrated_rtl(feature_columns, config, quantiles_dir) - elif FLAGS.model_type == "calibrated_etl": - return create_calibrated_etl(feature_columns, config, quantiles_dir) - elif FLAGS.model_type == "calibrated_dnn": - return create_calibrated_dnn(feature_columns, config, quantiles_dir) - - raise ValueError("Unknown model_type={}".format(FLAGS.model_type)) - - -def evaluate_on_data(estimator, data): - """Evaluates and prints results, set data to FLAGS.test or FLAGS.train.""" - name = os.path.basename(data) - evaluation = estimator.evaluate( - input_fn=get_input_fn( - file_path=data, - batch_size=FLAGS.batch_size, - num_epochs=1, - shuffle=False), - name=name) - print(" Evaluation on '{}':\taccuracy={:.4f}\taverage_loss={:.4f}".format( - name, evaluation["accuracy"], evaluation["average_loss"])) - - -def train(estimator): - """Trains estimator and optionally intermediary evaluations.""" - if not FLAGS.train_evaluate_on_train and not FLAGS.train_evaluate_on_test: - estimator.train(input_fn=get_train_input_fn( - batch_size=FLAGS.batch_size, - num_epochs=FLAGS.train_epochs, - shuffle=True)) - else: - # Train 1/10th of the epochs requested per loop, but at least 1 per loop. - epochs_trained = 0 - loops = 0 - while epochs_trained < FLAGS.train_epochs: - loops += 1 - next_epochs_trained = int(loops * FLAGS.train_epochs / 10.0) - epochs = max(1, next_epochs_trained - epochs_trained) - epochs_trained += epochs - estimator.train(input_fn=get_train_input_fn( - batch_size=FLAGS.batch_size, num_epochs=epochs, shuffle=True)) - print("Trained for {} epochs, total so far {}:".format( - epochs, epochs_trained)) - evaluate_on_data(estimator, FLAGS.train) - evaluate_on_data(estimator, FLAGS.test) - - -def evaluate(estimator): - """Runs straight evaluation on a currently trained model.""" - evaluate_on_data(estimator, FLAGS.train) - evaluate_on_data(estimator, FLAGS.test) - - -def main(args): - del args # Not used. - - # Prepare directories. - output_dir = FLAGS.output_dir - if output_dir is None: - output_dir = tempfile.mkdtemp() - tf.logging.warning("Using temporary folder as model directory: %s", - output_dir) - quantiles_dir = FLAGS.quantiles_dir or output_dir - - # Create quantiles if required. - if FLAGS.create_quantiles: - if FLAGS.run != "train": - raise ValueError( - "Can not create_quantiles for mode --run='{}'".format(FLAGS.run)) - create_quantiles(quantiles_dir) - - # Create config and then model. - config = tf.estimator.RunConfig().replace(model_dir=output_dir) - estimator = create_estimator(config, quantiles_dir) - - if FLAGS.run == "train": - train(estimator) - - elif FLAGS.run == "evaluate": - evaluate(estimator) - - else: - raise ValueError("Unknonw --run={}".format(FLAGS.run)) - - -if __name__ == "__main__": - tf.app.run() diff --git a/g3doc/api_docs/python/_toc.yaml b/g3doc/api_docs/python/_toc.yaml deleted file mode 100644 index 2a3a8c1..0000000 --- a/g3doc/api_docs/python/_toc.yaml +++ /dev/null @@ -1,62 +0,0 @@ -# Automatically generated file; please do not edit -toc: - - title: tensorflow_lattice - section: - - title: Overview - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice - - title: CalibratedEtlHParams - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/CalibratedEtlHParams - - title: CalibratedHParams - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/CalibratedHParams - - title: CalibratedLatticeHParams - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/CalibratedLatticeHParams - - title: CalibratedLinearHParams - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/CalibratedLinearHParams - - title: CalibratedRtlHParams - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/CalibratedRtlHParams - - title: calibrated_etl_classifier - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/calibrated_etl_classifier - - title: calibrated_etl_regressor - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/calibrated_etl_regressor - - title: calibrated_lattice_classifier - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/calibrated_lattice_classifier - - title: calibrated_lattice_regressor - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/calibrated_lattice_regressor - - title: calibrated_linear_classifier - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/calibrated_linear_classifier - - title: calibrated_linear_regressor - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/calibrated_linear_regressor - - title: calibrated_rtl_classifier - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/calibrated_rtl_classifier - - title: calibrated_rtl_regressor - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/calibrated_rtl_regressor - - title: calibration_layer - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/calibration_layer - - title: calibrator_regularization - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/calibrator_regularization - - title: ensemble_lattices_layer - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/ensemble_lattices_layer - - title: input_calibration_layer - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/input_calibration_layer - - title: input_calibration_layer_from_hparams - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/input_calibration_layer_from_hparams - - title: lattice - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/lattice - - title: lattice_layer - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/lattice_layer - - title: lattice_regularization - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/lattice_regularization - - title: load_keypoints_from_quantiles - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/load_keypoints_from_quantiles - - title: monotone_lattice - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/monotone_lattice - - title: monotonic_projection - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/monotonic_projection - - title: PerFeatureHParams - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/PerFeatureHParams - - title: pwl_indexing_calibrator - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/pwl_indexing_calibrator - - title: save_quantiles_for_keypoints - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/save_quantiles_for_keypoints - - title: uniform_keypoints_for_signal - path: /TARGET_DOC_ROOT/VERSION/api_docs/python/tensorflow_lattice/uniform_keypoints_for_signal diff --git a/g3doc/api_docs/python/index.md b/g3doc/api_docs/python/index.md deleted file mode 100644 index 33a4bdc..0000000 --- a/g3doc/api_docs/python/index.md +++ /dev/null @@ -1,31 +0,0 @@ -# All symbols in TensorFlow Lattice - -* [`tensorflow_lattice`](./tensorflow_lattice.md) -* [`tensorflow_lattice.CalibratedEtlHParams`](./tensorflow_lattice/CalibratedEtlHParams.md) -* [`tensorflow_lattice.CalibratedHParams`](./tensorflow_lattice/CalibratedHParams.md) -* [`tensorflow_lattice.CalibratedLatticeHParams`](./tensorflow_lattice/CalibratedLatticeHParams.md) -* [`tensorflow_lattice.CalibratedLinearHParams`](./tensorflow_lattice/CalibratedLinearHParams.md) -* [`tensorflow_lattice.CalibratedRtlHParams`](./tensorflow_lattice/CalibratedRtlHParams.md) -* [`tensorflow_lattice.PerFeatureHParams`](./tensorflow_lattice/PerFeatureHParams.md) -* [`tensorflow_lattice.calibrated_etl_classifier`](./tensorflow_lattice/calibrated_etl_classifier.md) -* [`tensorflow_lattice.calibrated_etl_regressor`](./tensorflow_lattice/calibrated_etl_regressor.md) -* [`tensorflow_lattice.calibrated_lattice_classifier`](./tensorflow_lattice/calibrated_lattice_classifier.md) -* [`tensorflow_lattice.calibrated_lattice_regressor`](./tensorflow_lattice/calibrated_lattice_regressor.md) -* [`tensorflow_lattice.calibrated_linear_classifier`](./tensorflow_lattice/calibrated_linear_classifier.md) -* [`tensorflow_lattice.calibrated_linear_regressor`](./tensorflow_lattice/calibrated_linear_regressor.md) -* [`tensorflow_lattice.calibrated_rtl_classifier`](./tensorflow_lattice/calibrated_rtl_classifier.md) -* [`tensorflow_lattice.calibrated_rtl_regressor`](./tensorflow_lattice/calibrated_rtl_regressor.md) -* [`tensorflow_lattice.calibration_layer`](./tensorflow_lattice/calibration_layer.md) -* [`tensorflow_lattice.calibrator_regularization`](./tensorflow_lattice/calibrator_regularization.md) -* [`tensorflow_lattice.ensemble_lattices_layer`](./tensorflow_lattice/ensemble_lattices_layer.md) -* [`tensorflow_lattice.input_calibration_layer`](./tensorflow_lattice/input_calibration_layer.md) -* [`tensorflow_lattice.input_calibration_layer_from_hparams`](./tensorflow_lattice/input_calibration_layer_from_hparams.md) -* [`tensorflow_lattice.lattice`](./tensorflow_lattice/lattice.md) -* [`tensorflow_lattice.lattice_layer`](./tensorflow_lattice/lattice_layer.md) -* [`tensorflow_lattice.lattice_regularization`](./tensorflow_lattice/lattice_regularization.md) -* [`tensorflow_lattice.load_keypoints_from_quantiles`](./tensorflow_lattice/load_keypoints_from_quantiles.md) -* [`tensorflow_lattice.monotone_lattice`](./tensorflow_lattice/monotone_lattice.md) -* [`tensorflow_lattice.monotonic_projection`](./tensorflow_lattice/monotonic_projection.md) -* [`tensorflow_lattice.pwl_indexing_calibrator`](./tensorflow_lattice/pwl_indexing_calibrator.md) -* [`tensorflow_lattice.save_quantiles_for_keypoints`](./tensorflow_lattice/save_quantiles_for_keypoints.md) -* [`tensorflow_lattice.uniform_keypoints_for_signal`](./tensorflow_lattice/uniform_keypoints_for_signal.md) \ No newline at end of file diff --git a/g3doc/api_docs/python/tensorflow_lattice.md b/g3doc/api_docs/python/tensorflow_lattice.md deleted file mode 100644 index 10220d3..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice.md +++ /dev/null @@ -1,83 +0,0 @@ -
- - - -
- -# Module: tensorflow_lattice - -Lattice modeling. - -This package provides functions and classes for lattice modeling. - -See full description in `README.md` file. - - - use them. - -## Classes - -[`class CalibratedEtlHParams`](./tensorflow_lattice/CalibratedEtlHParams.md): Hyper-parameters for CalibratedEtl (Embedded tiny lattices) models. - -[`class CalibratedHParams`](./tensorflow_lattice/CalibratedHParams.md): PerFeatureHParams specialization with input calibration parameters. - -[`class CalibratedLatticeHParams`](./tensorflow_lattice/CalibratedLatticeHParams.md): Hyper-parameters for CalibratedLattice models. - -[`class CalibratedLinearHParams`](./tensorflow_lattice/CalibratedLinearHParams.md): Hyper-parameters for CalibratedLinear models. - -[`class CalibratedRtlHParams`](./tensorflow_lattice/CalibratedRtlHParams.md): Hyper-parameters for CalibratedRtl (RandomTinyLattices) models. - -[`class PerFeatureHParams`](./tensorflow_lattice/PerFeatureHParams.md): Parameters object with per feature parametrization. - -## Functions - -[`calibrated_etl_classifier(...)`](./tensorflow_lattice/calibrated_etl_classifier.md): Calibrated etl binary classifier model. - -[`calibrated_etl_regressor(...)`](./tensorflow_lattice/calibrated_etl_regressor.md): Calibrated etl regressor model. - -[`calibrated_lattice_classifier(...)`](./tensorflow_lattice/calibrated_lattice_classifier.md): Calibrated lattice classifier binary model. - -[`calibrated_lattice_regressor(...)`](./tensorflow_lattice/calibrated_lattice_regressor.md): Calibrated lattice estimator (model) for regression. - -[`calibrated_linear_classifier(...)`](./tensorflow_lattice/calibrated_linear_classifier.md): Calibrated linear classifier binary model. - -[`calibrated_linear_regressor(...)`](./tensorflow_lattice/calibrated_linear_regressor.md): Calibrated linear estimator (model) for regression. - -[`calibrated_rtl_classifier(...)`](./tensorflow_lattice/calibrated_rtl_classifier.md): Calibrated rtl binary classifier model. - -[`calibrated_rtl_regressor(...)`](./tensorflow_lattice/calibrated_rtl_regressor.md): Calibrated rtl regressor model. - -[`calibration_layer(...)`](./tensorflow_lattice/calibration_layer.md): Creates a calibration layer for uncalibrated values. - -[`calibrator_regularization(...)`](./tensorflow_lattice/calibrator_regularization.md): Returns a calibrator regularization op. - -[`ensemble_lattices_layer(...)`](./tensorflow_lattice/ensemble_lattices_layer.md): Creates a ensemble of lattices layer. - -[`input_calibration_layer(...)`](./tensorflow_lattice/input_calibration_layer.md): Creates a calibration layer for the given input and feature_columns. - -[`input_calibration_layer_from_hparams(...)`](./tensorflow_lattice/input_calibration_layer_from_hparams.md): Creates a calibration layer for the input using hyper-parameters. - -[`lattice(...)`](./tensorflow_lattice/lattice.md): Returns an interpolated look-up table (lattice) op. - -[`lattice_layer(...)`](./tensorflow_lattice/lattice_layer.md): Creates a lattice layer. - -[`lattice_regularization(...)`](./tensorflow_lattice/lattice_regularization.md): Returns a lattice regularization op. - -[`load_keypoints_from_quantiles(...)`](./tensorflow_lattice/load_keypoints_from_quantiles.md): Retrieves keypoints initialization values for selected features. - -[`monotone_lattice(...)`](./tensorflow_lattice/monotone_lattice.md): Returns a projected lattice parameters onto the monotonicity constraints. - -[`monotonic_projection(...)`](./tensorflow_lattice/monotonic_projection.md): Returns a not-strict monotonic projection of the vector. - -[`pwl_indexing_calibrator(...)`](./tensorflow_lattice/pwl_indexing_calibrator.md): Returns tensor representing interpolation weights in a piecewise linear - -[`save_quantiles_for_keypoints(...)`](./tensorflow_lattice/save_quantiles_for_keypoints.md): Calculates and saves quantiles for given features. - -[`uniform_keypoints_for_signal(...)`](./tensorflow_lattice/uniform_keypoints_for_signal.md): Returns a pair of initialization tensors for calibration keypoints. - -## Other Members - -`DEFAULT_NAME` - -`absolute_import` - diff --git a/g3doc/api_docs/python/tensorflow_lattice/CalibratedEtlHParams.md b/g3doc/api_docs/python/tensorflow_lattice/CalibratedEtlHParams.md deleted file mode 100644 index 73d4cc0..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/CalibratedEtlHParams.md +++ /dev/null @@ -1,294 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - -
- -# tensorflow_lattice.CalibratedEtlHParams - -## Class `CalibratedEtlHParams` - -Inherits From: [`CalibratedHParams`](../tensorflow_lattice/CalibratedHParams.md) - -Hyper-parameters for CalibratedEtl (Embedded tiny lattices) models. - -Supports regularization and monotonicity like described in `CalibratedHParam`. -Values for `calibration_output_min`, `calibration_output_max` and -`missing_output_value` get set automatically. - -Note that this architecture does not support any of per-feature based lattice -hyper-parameters such as missing_vertex, per-feature missing_input_value, -per-feature lattice_size, per-feature lattice regularization, because after -the linear embedding, all of features are mixed together, so it is not clear -how to merge per-feature parameters after the linear embedding layer. - -If there is no non-monotonic feature, but `non_monotonic_lattice_rank` or -`non_monotonic_num_lattices` are not `None`, then this will raise the error. - -Added parameters: - -* `learning_rate`: (float) a global parameter that assigns a step size of an - optimizer. -* `lattice_size`: (int) a global parameter that controls number of - cells for a feature. Should be greater than equal to 2, and the recommended - default value is 2. Also calibrator output min and max should be - [0, `lattice_size` - 1], and the output should be bounded. -* `interpolation_type`: a global parameter that defines if the lattice will - interpolate using the full hypercube or only the simplex ("hyper-triangle", - much faster for larger lattices) around the point being evaluated. - Valid values: 'hypercube' or 'simplex' -* `monotonic_lattice_rank`: (int) a lattice rank in each monotonic lattice. -* `monotonic_num_lattices`: (int) a number of monotonic lattices to be - created. -* `monotonic_lattice_size`: (int) lattice cell size for each monotonic lattice - in the ensemble lattices layer. -* `non_monotonic_lattice_rank`: (int) a lattice rank in each non monotonic - lattice. If all features are monotonic, this parameter should be None. -* `non_monotonic_num_lattices`: (int) a number of non-monotonic lattices to be - created. If all features are monotonic, this parameter should be None. -* `monotonic_lattice_size`: (int) lattice cell size for each non-monotonic - lattice in the ensemble lattices layer. -* `linear_embedding_calibration_min`: (float) a global parameter that controls - a minimum value of intermediate calibration layers. Default is -100. -* `linear_embedding_calibration_max`: (float) a global parameter that controls - a maximum value of intermediate calibration layers. Default is 100. -* `linear_embedding_calibration_num_keypoints`: (float) a global parameter - that controls a `num_keypoints` in intermediate calibration layers. Default - is 100. - -## Methods - -

__init__

- -``` python -__init__( - feature_names=None, - **kwargs -) -``` - - - -

__getattr__

- -``` python -__getattr__(param_name) -``` - - - -

add_feature

- -``` python -add_feature(feature_name) -``` - -Add feature_name (one name or list of names) to list of known names. - -

get_feature_names

- -``` python -get_feature_names() -``` - -Returns copy of list of known feature names. - -

get_feature_param

- -``` python -get_feature_param( - feature_name, - param_name, - default=None -) -``` - -Returns parameter for feature or falls back to global parameter. - -

get_global_and_feature_params

- -``` python -get_global_and_feature_params( - param_names, - feature_names -) -``` - -Returns values for multiple params, global and for each feature. - -#### Args: - -* `param_names`: list of parameters to get values for. -* `feature_names`: list of features to get specific values for. - - -#### Returns: - -* List of global values for parameters requested in `param_names`. -* List of list of per feature values for parameters requested in - `param_names` for features requested in `feature_names`. - -

get_param

- -``` python -get_param( - param_name, - default=None -) -``` - -Returns the global parameter or falls back to default. - -

is_feature_set_param

- -``` python -is_feature_set_param( - feature_name, - param_name -) -``` - -Returns whether param_name parameter is set for feature_name. - -

param_name_for_feature

- -``` python -param_name_for_feature( - feature_name, - param_name -) -``` - -Returns parameter name for specific feature parameter. - -

parse

- -``` python -parse(hparams_str) -``` - -Parses strings into hparams. - -#### Args: - -* `hparams_str`: must be a comma separated list of "=", - where "" is a hyper-parameter name, and "" its value. - - -#### Returns: - -Changes affect self, but returns self for convenience. - - -#### Raises: - -* `ValueError`: if there is a problem with the input: - * if trying to set an unknown parameter. - * if trying to set unknown feature(s) - * if can't convert value to parameter type. - -

parse_hparams

- -``` python -parse_hparams(hparams) -``` - -Incorporates hyper-parameters from another HParams object. - -Copies over values of hyper-parameters from the given object. New parameters -may be set, but not new features. Also works with -`tf.contrib.training.HParams` objects. - -#### Args: - -* `hparams`: `PerFeatureHParams` object, but also works with the standard - `tf.contrib.training.HParams` object. - - -#### Returns: - -Changes affect self, but returns self for convenience. - - -#### Raises: - -* `ValueError`: if trying to set unknown features, or if setting a feature - specific parameter for an unknown parameter. - -

parse_param

- -``` python -parse_param( - param_name, - value_str -) -``` - -Parses parameter values from string. Returns self. - -

set_feature_param

- -``` python -set_feature_param( - feature_name, - param_name, - value -) -``` - -Sets parameter value specific for feature. Returns self. - -

set_param

- -``` python -set_param( - param_name, - value -) -``` - -Sets parameter value. Returns self. - -

set_param_type

- -``` python -set_param_type( - param_name, - param_type -) -``` - -Sets the parameter type, it must already exist. Returns self. - -

values

- -``` python -values() -``` - -Returns shallow copy of the hyperparameter dict. - - - -## Class Members - -

FEATURE_PREFIX

- -

FEATURE_SEPARATOR

- diff --git a/g3doc/api_docs/python/tensorflow_lattice/CalibratedHParams.md b/g3doc/api_docs/python/tensorflow_lattice/CalibratedHParams.md deleted file mode 100644 index 9a294b6..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/CalibratedHParams.md +++ /dev/null @@ -1,282 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - -
- -# tensorflow_lattice.CalibratedHParams - -## Class `CalibratedHParams` - -Inherits From: [`PerFeatureHParams`](../tensorflow_lattice/PerFeatureHParams.md) - -PerFeatureHParams specialization with input calibration parameters. - -The following hyper-parameters can be set as global, or per-feature (see -base `PerFeatureHParams` for details): - - * `feature_names`: list of feature names. Only features names listed here - (or added later with add_feature) can have feature specific parameter - values. - * `num_keypoints`: Number of keypoints to use for calibration, Set to 0 or - `None` for no calibration. - * `calibration_output_min`, `calibration_output_max`: initial and final - values for calibrations. -1.0 to 1.0 works well for calibrated linear - models. For lattices one will want to set these to (0, `lattice_size`-1). - Only used during initialization of the calibration, if `quantiles_dir` - is given to the calibrated model (as opposed to defining one's own value - with `keypoints_initializers_fn`). It must be defined for calibration to - work, no default is set. - * `calibration_bound`: If output of calibration max/min are bound to the - limits given in `calibration_output_min/max`. - * `monotonicity`: Monotonicity for the feature. 0 for no monotonicity, - 1 and -1 for increasing and decreasing monotonicity respectively. - * `missing_input_value`: If set, and if the input has this value it is - assumed - to be missing and the output will either be calibrated to some value - between `[calibration_output_min, calibration_output_max]` or set to a - fixed value set by missing_output_value. - * `missing_output_value`: Requires missing_input_value also to be set. If - set - if will convert missing input to this value. Leave it undefined and the - output will be learned. - * `calibration_l1_reg`, `calibration_l2_reg`, - `calibration_l1_laplacian_reg`, `calibration_l2_laplacian_reg`: Calibrator - regularizers regularization amount. Default is `None`. - -## Methods - -

__init__

- -``` python -__init__( - feature_names=None, - **kwargs -) -``` - - - -

__getattr__

- -``` python -__getattr__(param_name) -``` - - - -

add_feature

- -``` python -add_feature(feature_name) -``` - -Add feature_name (one name or list of names) to list of known names. - -

get_feature_names

- -``` python -get_feature_names() -``` - -Returns copy of list of known feature names. - -

get_feature_param

- -``` python -get_feature_param( - feature_name, - param_name, - default=None -) -``` - -Returns parameter for feature or falls back to global parameter. - -

get_global_and_feature_params

- -``` python -get_global_and_feature_params( - param_names, - feature_names -) -``` - -Returns values for multiple params, global and for each feature. - -#### Args: - -* `param_names`: list of parameters to get values for. -* `feature_names`: list of features to get specific values for. - - -#### Returns: - -* List of global values for parameters requested in `param_names`. -* List of list of per feature values for parameters requested in - `param_names` for features requested in `feature_names`. - -

get_param

- -``` python -get_param( - param_name, - default=None -) -``` - -Returns the global parameter or falls back to default. - -

is_feature_set_param

- -``` python -is_feature_set_param( - feature_name, - param_name -) -``` - -Returns whether param_name parameter is set for feature_name. - -

param_name_for_feature

- -``` python -param_name_for_feature( - feature_name, - param_name -) -``` - -Returns parameter name for specific feature parameter. - -

parse

- -``` python -parse(hparams_str) -``` - -Parses strings into hparams. - -#### Args: - -* `hparams_str`: must be a comma separated list of "=", - where "" is a hyper-parameter name, and "" its value. - - -#### Returns: - -Changes affect self, but returns self for convenience. - - -#### Raises: - -* `ValueError`: if there is a problem with the input: - * if trying to set an unknown parameter. - * if trying to set unknown feature(s) - * if can't convert value to parameter type. - -

parse_hparams

- -``` python -parse_hparams(hparams) -``` - -Incorporates hyper-parameters from another HParams object. - -Copies over values of hyper-parameters from the given object. New parameters -may be set, but not new features. Also works with -`tf.contrib.training.HParams` objects. - -#### Args: - -* `hparams`: `PerFeatureHParams` object, but also works with the standard - `tf.contrib.training.HParams` object. - - -#### Returns: - -Changes affect self, but returns self for convenience. - - -#### Raises: - -* `ValueError`: if trying to set unknown features, or if setting a feature - specific parameter for an unknown parameter. - -

parse_param

- -``` python -parse_param( - param_name, - value_str -) -``` - -Parses parameter values from string. Returns self. - -

set_feature_param

- -``` python -set_feature_param( - feature_name, - param_name, - value -) -``` - -Sets parameter value specific for feature. Returns self. - -

set_param

- -``` python -set_param( - param_name, - value -) -``` - -Sets parameter value. Returns self. - -

set_param_type

- -``` python -set_param_type( - param_name, - param_type -) -``` - -Sets the parameter type, it must already exist. Returns self. - -

values

- -``` python -values() -``` - -Returns shallow copy of the hyperparameter dict. - - - -## Class Members - -

FEATURE_PREFIX

- -

FEATURE_SEPARATOR

- diff --git a/g3doc/api_docs/python/tensorflow_lattice/CalibratedLatticeHParams.md b/g3doc/api_docs/python/tensorflow_lattice/CalibratedLatticeHParams.md deleted file mode 100644 index 6803985..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/CalibratedLatticeHParams.md +++ /dev/null @@ -1,277 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - -
- -# tensorflow_lattice.CalibratedLatticeHParams - -## Class `CalibratedLatticeHParams` - -Inherits From: [`CalibratedHParams`](../tensorflow_lattice/CalibratedHParams.md) - -Hyper-parameters for CalibratedLattice models. - -Supports regularization and monotonicity like described in `CalibratedHParam`. -Values for `calibration_output_min`, `calibration_output_max` and -`missing_output_value` get set automatically. - -Added parameters: - -* `learning_rate`: (float) a global parameter that assigns a step size of an - optimizer. -* `lattice_size`: (int) a global or per feature parameter that controls number - of cells for a feature. Should be greater than equal to 2, and the - recommended default value is 2. Also calibrator output min and max should be - [0, lattice_size - 1], and the output should be bounded, since a lattice - expects an input in the range [0, lattice_size - 1]. -* `interpolation_type`: a global parameter that defines if the lattice will - interpolate using the full hypercube or only the simplex ("hyper-triangle", - much faster for larger lattices) around the point being evaluated. - Valid values: 'hypercube' or 'simplex' -* `missing_input_value`: Value for which a feature is considered missing. Such - values are either automatically learned to some calibrated value, or, - if missing_vertex is set, they get their own value in the lattice. -* `missing_vertex`: if missing_input_value is set, this boolean value indicate - whether to create an extra vertex for missing values. -* `lattice_l1_reg`, `lattice_l2_reg`, `lattice_l1_torsion_reg`, - `lattice_l2_torsion_reg`, `lattice_l1_laplacian_reg`, - `lattice_l2_laplacian_reg`: Lattice regularizers regularization amount. - Default is `None`. - -## Methods - -

__init__

- -``` python -__init__( - feature_names=None, - **kwargs -) -``` - - - -

__getattr__

- -``` python -__getattr__(param_name) -``` - - - -

add_feature

- -``` python -add_feature(feature_name) -``` - -Add feature_name (one name or list of names) to list of known names. - -

get_feature_names

- -``` python -get_feature_names() -``` - -Returns copy of list of known feature names. - -

get_feature_param

- -``` python -get_feature_param( - feature_name, - param_name, - default=None -) -``` - -Returns parameter for feature or falls back to global parameter. - -

get_global_and_feature_params

- -``` python -get_global_and_feature_params( - param_names, - feature_names -) -``` - -Returns values for multiple params, global and for each feature. - -#### Args: - -* `param_names`: list of parameters to get values for. -* `feature_names`: list of features to get specific values for. - - -#### Returns: - -* List of global values for parameters requested in `param_names`. -* List of list of per feature values for parameters requested in - `param_names` for features requested in `feature_names`. - -

get_param

- -``` python -get_param( - param_name, - default=None -) -``` - -Returns the global parameter or falls back to default. - -

is_feature_set_param

- -``` python -is_feature_set_param( - feature_name, - param_name -) -``` - -Returns whether param_name parameter is set for feature_name. - -

param_name_for_feature

- -``` python -param_name_for_feature( - feature_name, - param_name -) -``` - -Returns parameter name for specific feature parameter. - -

parse

- -``` python -parse(hparams_str) -``` - -Parses strings into hparams. - -#### Args: - -* `hparams_str`: must be a comma separated list of "=", - where "" is a hyper-parameter name, and "" its value. - - -#### Returns: - -Changes affect self, but returns self for convenience. - - -#### Raises: - -* `ValueError`: if there is a problem with the input: - * if trying to set an unknown parameter. - * if trying to set unknown feature(s) - * if can't convert value to parameter type. - -

parse_hparams

- -``` python -parse_hparams(hparams) -``` - -Incorporates hyper-parameters from another HParams object. - -Copies over values of hyper-parameters from the given object. New parameters -may be set, but not new features. Also works with -`tf.contrib.training.HParams` objects. - -#### Args: - -* `hparams`: `PerFeatureHParams` object, but also works with the standard - `tf.contrib.training.HParams` object. - - -#### Returns: - -Changes affect self, but returns self for convenience. - - -#### Raises: - -* `ValueError`: if trying to set unknown features, or if setting a feature - specific parameter for an unknown parameter. - -

parse_param

- -``` python -parse_param( - param_name, - value_str -) -``` - -Parses parameter values from string. Returns self. - -

set_feature_param

- -``` python -set_feature_param( - feature_name, - param_name, - value -) -``` - -Sets parameter value specific for feature. Returns self. - -

set_param

- -``` python -set_param( - param_name, - value -) -``` - -Sets parameter value. Returns self. - -

set_param_type

- -``` python -set_param_type( - param_name, - param_type -) -``` - -Sets the parameter type, it must already exist. Returns self. - -

values

- -``` python -values() -``` - -Returns shallow copy of the hyperparameter dict. - - - -## Class Members - -

FEATURE_PREFIX

- -

FEATURE_SEPARATOR

- diff --git a/g3doc/api_docs/python/tensorflow_lattice/CalibratedLinearHParams.md b/g3doc/api_docs/python/tensorflow_lattice/CalibratedLinearHParams.md deleted file mode 100644 index 3e8d106..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/CalibratedLinearHParams.md +++ /dev/null @@ -1,260 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - -
- -# tensorflow_lattice.CalibratedLinearHParams - -## Class `CalibratedLinearHParams` - -Inherits From: [`CalibratedHParams`](../tensorflow_lattice/CalibratedHParams.md) - -Hyper-parameters for CalibratedLinear models. - -Same as `CalibratedHParams` (hyper-parameters for input calibration) plus -the global learning_rate. - -The parameters `calibration_output_min` and `calibration_output_max` shouldn't -be changed (they are fixed at -1. and +1), since they are eventually re-scaled -by the linear layer on top. - -It supports regularization, monotonicity and missing values (input and -optionally output). - -## Methods - -

__init__

- -``` python -__init__( - feature_names=None, - **kwargs -) -``` - - - -

__getattr__

- -``` python -__getattr__(param_name) -``` - - - -

add_feature

- -``` python -add_feature(feature_name) -``` - -Add feature_name (one name or list of names) to list of known names. - -

get_feature_names

- -``` python -get_feature_names() -``` - -Returns copy of list of known feature names. - -

get_feature_param

- -``` python -get_feature_param( - feature_name, - param_name, - default=None -) -``` - -Returns parameter for feature or falls back to global parameter. - -

get_global_and_feature_params

- -``` python -get_global_and_feature_params( - param_names, - feature_names -) -``` - -Returns values for multiple params, global and for each feature. - -#### Args: - -* `param_names`: list of parameters to get values for. -* `feature_names`: list of features to get specific values for. - - -#### Returns: - -* List of global values for parameters requested in `param_names`. -* List of list of per feature values for parameters requested in - `param_names` for features requested in `feature_names`. - -

get_param

- -``` python -get_param( - param_name, - default=None -) -``` - -Returns the global parameter or falls back to default. - -

is_feature_set_param

- -``` python -is_feature_set_param( - feature_name, - param_name -) -``` - -Returns whether param_name parameter is set for feature_name. - -

param_name_for_feature

- -``` python -param_name_for_feature( - feature_name, - param_name -) -``` - -Returns parameter name for specific feature parameter. - -

parse

- -``` python -parse(hparams_str) -``` - -Parses strings into hparams. - -#### Args: - -* `hparams_str`: must be a comma separated list of "=", - where "" is a hyper-parameter name, and "" its value. - - -#### Returns: - -Changes affect self, but returns self for convenience. - - -#### Raises: - -* `ValueError`: if there is a problem with the input: - * if trying to set an unknown parameter. - * if trying to set unknown feature(s) - * if can't convert value to parameter type. - -

parse_hparams

- -``` python -parse_hparams(hparams) -``` - -Incorporates hyper-parameters from another HParams object. - -Copies over values of hyper-parameters from the given object. New parameters -may be set, but not new features. Also works with -`tf.contrib.training.HParams` objects. - -#### Args: - -* `hparams`: `PerFeatureHParams` object, but also works with the standard - `tf.contrib.training.HParams` object. - - -#### Returns: - -Changes affect self, but returns self for convenience. - - -#### Raises: - -* `ValueError`: if trying to set unknown features, or if setting a feature - specific parameter for an unknown parameter. - -

parse_param

- -``` python -parse_param( - param_name, - value_str -) -``` - -Parses parameter values from string. Returns self. - -

set_feature_param

- -``` python -set_feature_param( - feature_name, - param_name, - value -) -``` - -Sets parameter value specific for feature. Returns self. - -

set_param

- -``` python -set_param( - param_name, - value -) -``` - -Sets parameter value. Returns self. - -

set_param_type

- -``` python -set_param_type( - param_name, - param_type -) -``` - -Sets the parameter type, it must already exist. Returns self. - -

values

- -``` python -values() -``` - -Returns shallow copy of the hyperparameter dict. - - - -## Class Members - -

FEATURE_PREFIX

- -

FEATURE_SEPARATOR

- diff --git a/g3doc/api_docs/python/tensorflow_lattice/CalibratedRtlHParams.md b/g3doc/api_docs/python/tensorflow_lattice/CalibratedRtlHParams.md deleted file mode 100644 index 98f9cad..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/CalibratedRtlHParams.md +++ /dev/null @@ -1,283 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - -
- -# tensorflow_lattice.CalibratedRtlHParams - -## Class `CalibratedRtlHParams` - -Inherits From: [`CalibratedHParams`](../tensorflow_lattice/CalibratedHParams.md) - -Hyper-parameters for CalibratedRtl (RandomTinyLattices) models. - -Supports regularization and monotonicity like described in `CalibratedHParam`. -Values for `calibration_output_min`, `calibration_output_max` and -`missing_output_value` get set automatically. - -Added parameters: - -* `learning_rate`: (float) a global parameter that assigns a step size of an - optimizer. -* `lattice_size`: (int) a global or per feature parameter that controls number - of cells for a feature. Should be greater than equal to 2, and the - recommended default value is 2. Also calibrator output min and max should be - [0, lattice_size - 1], and the output should be bounded, since a lattice - expects an input in the range [0, lattice_size - 1]. (Note if missing_vertex - is True, then we add an extra vertex, so input range is [0, lattice_size]) -* `num_lattices`: (int) a number of lattices to be created. -* `lattice_rank`: (int) a lattice rank in each lattice. -* `interpolation_type`: a global parameter that defines if the lattice will - interpolate using the full hypercube or only the simplex ("hyper-triangle", - much faster for larger lattices) around the point being evaluated. - Valid values: 'hypercube' or 'simplex' -* `ensemble_bias`: (float) an initial value of bias term to be added to the - output of ensemble. -* `rtl_seed`: (int) a random seed for rtl construction. -* `missing_input_value`: Value for which a feature is considered missing. Such - values are either automatically learned to some calibrated value, or, - if missing_vertex is set, they get their own value in the lattice. -* `missing_vertex`: if missing_input_value is set, this boolean value indicate - whether to create an extra vertex for missing values. -* `lattice_l1_reg`, `lattice_l2_reg`, `lattice_l1_torsion_reg`, - `lattice_l2_torsion_reg`, `lattice_l1_laplacian_reg`, - `lattice_l2_laplacian_reg`: Latticer regularizers regularization amount. - Default is `None`. - -## Methods - -

__init__

- -``` python -__init__( - feature_names=None, - **kwargs -) -``` - - - -

__getattr__

- -``` python -__getattr__(param_name) -``` - - - -

add_feature

- -``` python -add_feature(feature_name) -``` - -Add feature_name (one name or list of names) to list of known names. - -

get_feature_names

- -``` python -get_feature_names() -``` - -Returns copy of list of known feature names. - -

get_feature_param

- -``` python -get_feature_param( - feature_name, - param_name, - default=None -) -``` - -Returns parameter for feature or falls back to global parameter. - -

get_global_and_feature_params

- -``` python -get_global_and_feature_params( - param_names, - feature_names -) -``` - -Returns values for multiple params, global and for each feature. - -#### Args: - -* `param_names`: list of parameters to get values for. -* `feature_names`: list of features to get specific values for. - - -#### Returns: - -* List of global values for parameters requested in `param_names`. -* List of list of per feature values for parameters requested in - `param_names` for features requested in `feature_names`. - -

get_param

- -``` python -get_param( - param_name, - default=None -) -``` - -Returns the global parameter or falls back to default. - -

is_feature_set_param

- -``` python -is_feature_set_param( - feature_name, - param_name -) -``` - -Returns whether param_name parameter is set for feature_name. - -

param_name_for_feature

- -``` python -param_name_for_feature( - feature_name, - param_name -) -``` - -Returns parameter name for specific feature parameter. - -

parse

- -``` python -parse(hparams_str) -``` - -Parses strings into hparams. - -#### Args: - -* `hparams_str`: must be a comma separated list of "=", - where "" is a hyper-parameter name, and "" its value. - - -#### Returns: - -Changes affect self, but returns self for convenience. - - -#### Raises: - -* `ValueError`: if there is a problem with the input: - * if trying to set an unknown parameter. - * if trying to set unknown feature(s) - * if can't convert value to parameter type. - -

parse_hparams

- -``` python -parse_hparams(hparams) -``` - -Incorporates hyper-parameters from another HParams object. - -Copies over values of hyper-parameters from the given object. New parameters -may be set, but not new features. Also works with -`tf.contrib.training.HParams` objects. - -#### Args: - -* `hparams`: `PerFeatureHParams` object, but also works with the standard - `tf.contrib.training.HParams` object. - - -#### Returns: - -Changes affect self, but returns self for convenience. - - -#### Raises: - -* `ValueError`: if trying to set unknown features, or if setting a feature - specific parameter for an unknown parameter. - -

parse_param

- -``` python -parse_param( - param_name, - value_str -) -``` - -Parses parameter values from string. Returns self. - -

set_feature_param

- -``` python -set_feature_param( - feature_name, - param_name, - value -) -``` - -Sets parameter value specific for feature. Returns self. - -

set_param

- -``` python -set_param( - param_name, - value -) -``` - -Sets parameter value. Returns self. - -

set_param_type

- -``` python -set_param_type( - param_name, - param_type -) -``` - -Sets the parameter type, it must already exist. Returns self. - -

values

- -``` python -values() -``` - -Returns shallow copy of the hyperparameter dict. - - - -## Class Members - -

FEATURE_PREFIX

- -

FEATURE_SEPARATOR

- diff --git a/g3doc/api_docs/python/tensorflow_lattice/PerFeatureHParams.md b/g3doc/api_docs/python/tensorflow_lattice/PerFeatureHParams.md deleted file mode 100644 index 3530c2a..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/PerFeatureHParams.md +++ /dev/null @@ -1,297 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - -
- -# tensorflow_lattice.PerFeatureHParams - -## Class `PerFeatureHParams` - - - -Parameters object with per feature parametrization. - -Each parameter can be overwritten for specific features by setting -`feature____`, otherwise it falls back to the -global parameter name value ``. - -Parameter types are set from their first value set -- but they can also be -reset by `set_param_type`. - -Example: let's say we have a parameter `lattice_size` that should be 2 if not -specified (global value), but can be overridden per feature; let's assume -there are 3 features: `a`, `b`, and `c` (added after construction). Then: - -```python - hparams = PerFeatureHParams(["a", "b"], lattice_size=2, - feature__b__lattice_size=3) - hparams.add_feature(["c"]) - hparams.get_param("lattice_size") == 2 - hparams.get_feature_param("a", "lattice_size") == 2 - hparams.get_feature_param("b", "lattice_size") == 3 - hparams.get_feature_param("c", "lattice_size") == 2 - hparams.get_feature_param("d", "lattice_size") raises a ValueError -``` - -Use the `get_feature_param` method to automatically get the specialized value, -or fall-back to the global one. - - - - - -## Methods - -

__init__

- -``` python -__init__( - feature_names=None, - **kwargs -) -``` - -Construct with arbitrary list of parameters. - -#### Args: - -* `feature_names`: list of feature names. Only features names listed here - (or added later with add_feature) can have feature specific parameter - values. -* `**kwargs`: parameters names. - - -#### Returns: - -PerFeatureHParams object. - - -#### Raises: - -* `ValueError`: if a feature-specific parameter value is set for an - unknown feature. - -

__getattr__

- -``` python -__getattr__(param_name) -``` - - - -

add_feature

- -``` python -add_feature(feature_name) -``` - -Add feature_name (one name or list of names) to list of known names. - -

get_feature_names

- -``` python -get_feature_names() -``` - -Returns copy of list of known feature names. - -

get_feature_param

- -``` python -get_feature_param( - feature_name, - param_name, - default=None -) -``` - -Returns parameter for feature or falls back to global parameter. - -

get_global_and_feature_params

- -``` python -get_global_and_feature_params( - param_names, - feature_names -) -``` - -Returns values for multiple params, global and for each feature. - -#### Args: - -* `param_names`: list of parameters to get values for. -* `feature_names`: list of features to get specific values for. - - -#### Returns: - -* List of global values for parameters requested in `param_names`. -* List of list of per feature values for parameters requested in - `param_names` for features requested in `feature_names`. - -

get_param

- -``` python -get_param( - param_name, - default=None -) -``` - -Returns the global parameter or falls back to default. - -

is_feature_set_param

- -``` python -is_feature_set_param( - feature_name, - param_name -) -``` - -Returns whether param_name parameter is set for feature_name. - -

param_name_for_feature

- -``` python -param_name_for_feature( - feature_name, - param_name -) -``` - -Returns parameter name for specific feature parameter. - -

parse

- -``` python -parse(hparams_str) -``` - -Parses strings into hparams. - -#### Args: - -* `hparams_str`: must be a comma separated list of "=", - where "" is a hyper-parameter name, and "" its value. - - -#### Returns: - -Changes affect self, but returns self for convenience. - - -#### Raises: - -* `ValueError`: if there is a problem with the input: - * if trying to set an unknown parameter. - * if trying to set unknown feature(s) - * if can't convert value to parameter type. - -

parse_hparams

- -``` python -parse_hparams(hparams) -``` - -Incorporates hyper-parameters from another HParams object. - -Copies over values of hyper-parameters from the given object. New parameters -may be set, but not new features. Also works with -`tf.contrib.training.HParams` objects. - -#### Args: - -* `hparams`: `PerFeatureHParams` object, but also works with the standard - `tf.contrib.training.HParams` object. - - -#### Returns: - -Changes affect self, but returns self for convenience. - - -#### Raises: - -* `ValueError`: if trying to set unknown features, or if setting a feature - specific parameter for an unknown parameter. - -

parse_param

- -``` python -parse_param( - param_name, - value_str -) -``` - -Parses parameter values from string. Returns self. - -

set_feature_param

- -``` python -set_feature_param( - feature_name, - param_name, - value -) -``` - -Sets parameter value specific for feature. Returns self. - -

set_param

- -``` python -set_param( - param_name, - value -) -``` - -Sets parameter value. Returns self. - -

set_param_type

- -``` python -set_param_type( - param_name, - param_type -) -``` - -Sets the parameter type, it must already exist. Returns self. - -

values

- -``` python -values() -``` - -Returns shallow copy of the hyperparameter dict. - - - -## Class Members - -

FEATURE_PREFIX

- -

FEATURE_SEPARATOR

- diff --git a/g3doc/api_docs/python/tensorflow_lattice/calibrated_etl_classifier.md b/g3doc/api_docs/python/tensorflow_lattice/calibrated_etl_classifier.md deleted file mode 100644 index 26454f8..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/calibrated_etl_classifier.md +++ /dev/null @@ -1,114 +0,0 @@ -
- -
- -# tensorflow_lattice.calibrated_etl_classifier - -``` python -calibrated_etl_classifier( - feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None -) -``` - -Calibrated etl binary classifier model. - - - -This model uses a piecewise lattice calibration function on each of the -inputs (parametrized) and then feeds them to ensemble of random lattices. -num_lattices and lattice_rank (number of inputs to each lattice) must be -specified in the hyperparameter. Optionally calibration can be made monotonic. - -It usually requires a preprocessing step on the data, to calculate the -quantiles of each used feature. This can be done locally or in one worker -only before training, in a separate invocation of your program (or directly). -Typically this can be saved (`save_dir` parameter) to the same -directory where the data is. - -Hyper-parameters are given in the form of the object -tfl_hparams.CalibrationEtlHParams. lattice_rank and num_lattices must -be specified; there would be no default value for this. It also takes in -per-feature parameters. - -Internally values will be converted to tf.float32. - -Example: - -```python -def input_fn_train: ... -def input_fn_eval: ... - -my_feature_columns=[...] - -# Have a separate program flag to generate the quantiles. Need to be run -# only once. -if FLAGS.create_quantiles: - pwl_calibrators_layers.calculate_quantiles_for_keypoints( - input_fn=input_fn_train, - feature_columns=my_feature_columns, - save_dir=FLAGS.data_dir, - num_quantiles=1000, - override=True) - return # Exit program. - -hparams = hparams.CalibratedEtlHparams(num_lattices=10, lattice_rank=2) -estimator = calibrated_etl.calibrated_etl_classifier( - feature_columns=feature_columns, hparams=hparams) -estimator.train(input_fn=input_fn_train) -estimator.evaluate(input_fn=input_fn_eval) -estimator.predict(input_fn=input_fn_predict) -``` - -#### Args: - -* `feature_columns`: Optional, an iteratable containing all the feature - columns used by the model. All items in the set should be instances of - classes derived from `FeatureColumn`. If not given, the model will - use as features the tensors returned by input_fn. - Supported types of columns: RealValuedColumn. -* `model_dir`: Directory to save model parameters, graphs and etc. This can - also be used to load checkpoints from the directory into an estimator to - continue training a previously saved model. -* `quantiles_dir`: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated only once with - `pwl_calibration_layers.calculate_quantiles_for_keypoints` in a separate - invocation of your program. If you don't want to use quantiles, you can - set `keypoints_initializer` instead. -* `keypoints_initializers_fn`: if you know the distribution of your - input features you can provide that directly instead of `quantiles_dir`. - See `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be - a closure that returns a pair of tensors with keypoints inputs and - outputs to use for initialization (must match `num_keypoints` configured - in `hparams`). Alternatively the closure can return a dict mapping - feature name to pairs for initialization per feature. If `quantiles_dir` - and `keypoints_initializers_fn` are set, the later takes precendence, - and the features for which `keypoints_initializers` are not defined - fallback to using the quantiles found in `quantiles_dir`. It uses a - closure instead of the tensors themselves because the graph has to be - created at the time the model is being build, which happens at a later - time. -* `optimizer`: string, `Optimizer` object, or callable that defines the - optimizer to use for training -- if a callable, it will be called with - learning_rate=hparams.learning_rate. -* `config`: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). -* `hparams`: an instance of tfl_hparams.CalibrationEtlHParams. If set to - None default parameters are used. - - -#### Returns: - -A `calibrated_etl_classifier` estimator. - - -#### Raises: - -* `ValueError`: invalid parameters. -* `KeyError`: type of feature not supported. \ No newline at end of file diff --git a/g3doc/api_docs/python/tensorflow_lattice/calibrated_etl_regressor.md b/g3doc/api_docs/python/tensorflow_lattice/calibrated_etl_regressor.md deleted file mode 100644 index 2c200ea..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/calibrated_etl_regressor.md +++ /dev/null @@ -1,112 +0,0 @@ -
- -
- -# tensorflow_lattice.calibrated_etl_regressor - -``` python -calibrated_etl_regressor( - feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None -) -``` - -Calibrated etl regressor model. - -This model uses a piecewise lattice calibration function on each of the -inputs (parametrized) and then feeds them to ensemble of random lattices. -num_lattices and lattice_rank (number of inputs to each lattice) must be -specified in the hyperparameter. Optionally calibration can be made monotonic. - -It usually requires a preprocessing step on the data, to calculate the -quantiles of each used feature. This can be done locally or in one worker -only before training, in a separate invocation of your program (or directly). -Typically this can be saved (`save_dir` parameter) to the same -directory where the data is. - -Hyper-parameters are given in the form of the object -tfl_hparams.CalibrationEtlHParams. lattice_rank and num_lattices must -be specified; there would be no default value for this. It also takes in -per-feature parameters. - -Internally values will be converted to tf.float32. - -Example: - -```python -def input_fn_train: ... -def input_fn_eval: ... - -my_feature_columns=[...] - -# Have a separate program flag to generate the quantiles. Need to be run -# only once. -if FLAGS.create_quantiles: - pwl_calibrators_layers.calculate_quantiles_for_keypoints( - input_fn=input_fn_train, - feature_columns=my_feature_columns, - save_dir=FLAGS.data_dir, - num_quantiles=1000, - override=True) - return # Exit program. - -hparams = hparams.CalibratedEtlHparams(num_lattices=10, lattice_rank=2) -estimator = calibrated_etl.calibrated_etl_classifier( - feature_columns=feature_columns, hparams=hparams) -estimator.train(input_fn=input_fn_train) -estimator.evaluate(input_fn=input_fn_eval) -estimator.predict(input_fn=input_fn_predict) -``` - -#### Args: - -* `feature_columns`: Optional, an iteratable containing all the feature - columns used by the model. All items in the set should be instances of - classes derived from `FeatureColumn`. If not given, the model will - use as features the tensors returned by input_fn. - Supported types of columns: RealValuedColumn. -* `model_dir`: Directory to save model parameters, graphs and etc. This can - also be used to load checkpoints from the directory into an estimator to - continue training a previously saved model. -* `quantiles_dir`: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated only once with - `pwl_calibration_layers.calculate_quantiles_for_keypoints` in a separate - invocation of your program. If you don't want to use quantiles, you can - set `keypoints_initializer` instead. -* `keypoints_initializers_fn`: if you know the distribution of your - input features you can provide that directly instead of `quantiles_dir`. - See `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be - a closure that returns a pair of tensors with keypoints inputs and - outputs to use for initialization (must match `num_keypoints` configured - in `hparams`). Alternatively the closure can return a dict mapping - feature name to pairs for initialization per feature. If `quantiles_dir` - and `keypoints_initializers_fn` are set, the later takes precendence, - and the features for which `keypoints_initializers` are not defined - fallback to using the quantiles found in `quantiles_dir`. It uses a - closure instead of the tensors themselves because the graph has to be - created at the time the model is being build, which happens at a later - time. -* `optimizer`: string, `Optimizer` object, or callable that defines the - optimizer to use for training -- if a callable, it will be called with - learning_rate=hparams.learning_rate. -* `config`: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). -* `hparams`: an instance of tfl_hparams.CalibrationEtlHParams. If set to - None default parameters are used. - - -#### Returns: - -A `calibrated_etl_regressor` estimator. - - -#### Raises: - -* `ValueError`: invalid parameters. -* `KeyError`: type of feature not supported. \ No newline at end of file diff --git a/g3doc/api_docs/python/tensorflow_lattice/calibrated_lattice_classifier.md b/g3doc/api_docs/python/tensorflow_lattice/calibrated_lattice_classifier.md deleted file mode 100644 index 6e0dda2..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/calibrated_lattice_classifier.md +++ /dev/null @@ -1,107 +0,0 @@ -
- -
- -# tensorflow_lattice.calibrated_lattice_classifier - -``` python -calibrated_lattice_classifier( - feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None -) -``` - -Calibrated lattice classifier binary model. - - - -This model uses a piecewise lattice calibration function on each of the -real (as opposed to binary) inputs (parametrized) and then combines (sum up) -the results. Optionally calibration can be made monotonic. - -It usually requires a preprocessing step on the data, to calculate the -quantiles of each used feature. This can be done locally or in one worker -only before training, in a separate invocation of your program (or directly). -Typically this can be save (`save_dir` parameter) to the same -directory where the data is. - -Hyper-parameters are given in the form of the object -tfl_hparams.CalibrationHParams. It takes in per-feature calibration -parameters. - -Internally values will be converted to tf.float32. - -Example: - -```python -def input_fn_train: ... -def input_fn_eval: ... - -my_feature_columns=[...] - -# Have a separate program flag to generate the quantiles. Need to be run -# only once. -if FLAGS.create_quantiles: - pwl_calibrators_layers.calculate_quantiles_for_keypoints( - input_fn=input_fn_train, - feature_columns=my_feature_columns, - save_dir=FLAGS.data_dir, - num_quantiles=1000, - override=True) - return # Exit program. - -estimator = calibrated_lattice.CalibratedLatticeClassifier( - feature_columns=feature_columns) -estimator.train(input_fn=input_fn_train) -estimator.evaluate(input_fn=input_fn_eval) -estimator.predict(input_fn=input_fn_predict) -``` - -#### Args: - -* `feature_columns`: Optional, an iteratable containing all the feature - columns used by the model. All items in the set should be instances of - classes derived from `FeatureColumn`. If not given, the model will - use as features the tensors returned by input_fn. - Supported types of columns: RealValuedColumn. -* `model_dir`: Directory to save model parameters, graph and etc. This can - also be used to load checkpoints from the directory into a estimator to - continue training a previously saved model. -* `quantiles_dir`: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated only once with - `pwl_calibrators_layers.calculate_quantiles_for_keypoints` in a separate - invocation of your program. If you don't want to use quantiles, you can - set `keypoints_initializer` instead. -* `keypoints_initializers_fn`: if you know the distribution of your - input features you can provide that directly instead of `quantiles_dir`. - See `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be - as a closure that when called will return a pair of tensors with - keypoints input and output initializes. Alternatively can be given as - a dict mapping feature name to keypoints_initializers_fn, so one - can have one initialization per feature. It uses a closure instead of - the tensors themselves because the graph has to be created at the time - the model is being build, which happens at a later time. -* `optimizer`: string, `Optimizer` object, or callable that defines the - optimizer to use for training -- if a callable, it will be called with - learning_rate=hparams.learning_rate. -* `config`: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). -* `hparams`: an instance of tfl_hparams.CalibrationHParams. If set to - None default parameters are used. - - -#### Returns: - -A `CalibratedLatticeClassifier` estimator. - - -#### Raises: - -* `ValueError`: invalid parameters. -* `KeyError`: type of feature not supported. \ No newline at end of file diff --git a/g3doc/api_docs/python/tensorflow_lattice/calibrated_lattice_regressor.md b/g3doc/api_docs/python/tensorflow_lattice/calibrated_lattice_regressor.md deleted file mode 100644 index df56ee7..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/calibrated_lattice_regressor.md +++ /dev/null @@ -1,108 +0,0 @@ -
- -
- -# tensorflow_lattice.calibrated_lattice_regressor - -``` python -calibrated_lattice_regressor( - feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None -) -``` - -Calibrated lattice estimator (model) for regression. - -This model uses a piecewise lattice calibration function on each of the -inputs (parametrized) and then combine (sum up) the results. Optionally -calibration can be made monotonic. - -It usually requires a preprocessing step on the data, to calculate the -quantiles of each used feature. This can be done locally or in one worker -only before training, in a separate invocation of your program (or directly) -in . Typically this can be save (`save_dir` parameter) to the same -directory where the data is. - -Hyper-parameters are given in the form of the object -tfl_hparams.CalibrationHParams. It takes in per-feature calibration -parameters. - -Internally values will be converted to tf.float32. - - - -Example: - -```python -def input_fn_train: ... -def input_fn_eval: ... - -my_feature_columns=[...] - -# Have a separate program flag to generate the quantiles. Need to be run -# only once. -if FLAGS.create_quantiles: - pwl_calibrators_layers.calculate_quantiles_for_keypoints( - input_fn=input_fn_train, - feature_columns=my_feature_columns, - save_dir=FLAGS.data_dir, - num_quantiles=1000, - override=True) - return # Exit program. - -estimator = calibrated_lattice.calibrated_lattice_regressor( - feature_columns=feature_columns) -estimator.train(input_fn=input_fn_train) -estimator.evaluate(input_fn=input_fn_eval) -estimator.predict(input_fn=input_fn_predict) -``` - -#### Args: - -* `feature_columns`: Optional, if not set the model will use all features - returned by input_fn. An iteratable containing all the feature - columns used by the model. All items in the set should be instances of - classes derived from `FeatureColumn`. If not given, the model will - use as features the tensors returned by input_fn. - Supported types: RealValuedColumn. -* `model_dir`: Directory to save model parameters, graph and etc. This can - also be used to load checkpoints from the directory into a estimator to - continue training a previously saved model. -* `quantiles_dir`: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated only once with - `pwl_calibrators_layers.calculate_quantiles_for_keypoints` in a separate - invocation of your program. If you don't want to use quantiles, you can - set `keypoints_initializer` instead. -* `keypoints_initializers_fn`: if you know the distribution of your - input features you can provide that directly instead of `quantiles_dir`. - See `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be - as a closure that when called will return a pair of tensors with - keypoints input and output initializes. Alternatively can be given as - a dict mapping feature name to keypoints_initializers_fn, so one - can have one initialization per feature. It uses a closure instead of - the tensors themselves because the graph has to be created at the time - the model is being build, which happens at a later time. -* `optimizer`: string, `Optimizer` object, or callable that defines the - optimizer to use for training -- if a callable, it will be called with - learning_rate=hparams.learning_rate. -* `config`: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). -* `hparams`: an instance of tfl_hparams.CalibrationHParams. If set to - None default parameters are used. - - -#### Returns: - -A `CalibratedLatticeRegressor` estimator. - - -#### Raises: - -* `ValueError`: invalid parameters. -* `KeyError`: type of feature not supported. \ No newline at end of file diff --git a/g3doc/api_docs/python/tensorflow_lattice/calibrated_linear_classifier.md b/g3doc/api_docs/python/tensorflow_lattice/calibrated_linear_classifier.md deleted file mode 100644 index 49c3f66..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/calibrated_linear_classifier.md +++ /dev/null @@ -1,111 +0,0 @@ -
- -
- -# tensorflow_lattice.calibrated_linear_classifier - -``` python -calibrated_linear_classifier( - feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None -) -``` - -Calibrated linear classifier binary model. - - - -This model uses a piecewise linear calibration function on each of the -real (as opposed to binary) inputs (parametrized) and then combines (sum up) -the results. Optionally calibration can be made monotonic. - -It usually requires a preprocessing step on the data, to calculate the -quantiles of each used feature. This can be done locally or in one worker -only before training, in a separate invocation of your program (or directly). -Typically this can be save (`save_dir` parameter) to the same -directory where the data is. - -Hyper-parameters are given in the form of the object -tfl_hparams.CalibrationHParams. It takes in per-feature calibration -parameters. - -Internally values will be converted to tf.float32. - -Example: - -```python -def input_fn_train: ... -def input_fn_eval: ... - -my_feature_columns=[...] - -# Have a separate program flag to generate the quantiles. Need to be run -# only once. -if FLAGS.create_quantiles: - pwl_calibrators_layers.calculate_quantiles_for_keypoints( - input_fn=input_fn_train, - feature_columns=my_feature_columns, - save_dir=FLAGS.data_dir, - num_quantiles=1000, - override=True) - return # Exit program. - -estimator = calibrated_linear.CalibratedLinearClassifier( - feature_columns=feature_columns) -estimator.train(input_fn=input_fn_train) -estimator.evaluate(input_fn=input_fn_eval) -estimator.predict(input_fn=input_fn_predict) -``` - -#### Args: - -* `feature_columns`: Optional, an iteratable containing all the feature - columns used by the model. All items in the set should be instances of - classes derived from `FeatureColumn`. If not given, the model will - use as features the tensors returned by input_fn. - Supported types of columns: RealValuedColumn. -* `model_dir`: Directory to save model parameters, graph and etc. This can - also be used to load checkpoints from the directory into a estimator to - continue training a previously saved model. -* `quantiles_dir`: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated only once with - `pwl_calibration_layers.calculate_quantiles_for_keypoints` in a separate - invocation of your program. If you don't want to use quantiles, you can - set `keypoints_initializer` instead. -* `keypoints_initializers_fn`: if you know the distribution of your - input features you can provide that directly instead of `quantiles_dir`. - See `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be - a closure that returns a pair of tensors with keypoints inputs and - outputs to use for initialization (must match `num_keypoints` configured - in `hparams`). Alternatively the closure can return a dict mapping - feature name to pairs for initialization per feature. If `quantiles_dir` - and `keypoints_initializers_fn` are set, the later takes precendence, - and the features for which `keypoints_initializers` are not defined - fallback to using the quantiles found in `quantiles_dir`. It uses a - closure instead of the tensors themselves because the graph has to be - created at the time the model is being build, which happens at a later - time. -* `optimizer`: string, `Optimizer` object, or callable that defines the - optimizer to use for training -- if a callable, it will be called with - learning_rate=hparams.learning_rate. -* `config`: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). -* `hparams`: an instance of tfl_hparams.CalibrationHParams. If set to - None default parameters are used. - - -#### Returns: - -A `CalibratedLinearClassifier` estimator. - - -#### Raises: - -* `ValueError`: invalid parameters. -* `KeyError`: type of feature not supported. \ No newline at end of file diff --git a/g3doc/api_docs/python/tensorflow_lattice/calibrated_linear_regressor.md b/g3doc/api_docs/python/tensorflow_lattice/calibrated_linear_regressor.md deleted file mode 100644 index be5a622..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/calibrated_linear_regressor.md +++ /dev/null @@ -1,112 +0,0 @@ -
- -
- -# tensorflow_lattice.calibrated_linear_regressor - -``` python -calibrated_linear_regressor( - feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None -) -``` - -Calibrated linear estimator (model) for regression. - -This model uses a piecewise linear calibration function on each of the -inputs (parametrized) and then combine (sum up) the results. Optionally -calibration can be made monotonic. - -It usually requires a preprocessing step on the data, to calculate the -quantiles of each used feature. This can be done locally or in one worker -only before training, in a separate invocation of your program (or directly) -in . Typically this can be save (`save_dir` parameter) to the same -directory where the data is. - -Hyper-parameters are given in the form of the object -tfl_hparams.CalibrationHParams. It takes in per-feature calibration -parameters. - -Internally values will be converted to tf.float32. - - - -Example: - -```python -def input_fn_train: ... -def input_fn_eval: ... - -my_feature_columns=[...] - -# Have a separate program flag to generate the quantiles. Need to be run -# only once. -if FLAGS.create_quantiles: - pwl_calibrators_layers.calculate_quantiles_for_keypoints( - input_fn=input_fn_train, - feature_columns=my_feature_columns, - save_dir=FLAGS.data_dir, - num_quantiles=1000, - override=True) - return # Exit program. - -estimator = calibrated_linear.calibrated_linear_regressor( - feature_columns=feature_columns) -estimator.train(input_fn=input_fn_train) -estimator.evaluate(input_fn=input_fn_eval) -estimator.predict(input_fn=input_fn_predict) -``` - -#### Args: - -* `feature_columns`: Optional, if not set the model will use all features - returned by input_fn. An iteratable containing all the feature - columns used by the model. All items in the set should be instances of - classes derived from `FeatureColumn`. If not given, the model will - use as features the tensors returned by input_fn. - Supported types: RealValuedColumn. -* `model_dir`: Directory to save model parameters, graph and etc. This can - also be used to load checkpoints from the directory into a estimator to - continue training a previously saved model. -* `quantiles_dir`: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated only once with - `pwl_calibration_layers.calculate_quantiles_for_keypoints` in a separate - invocation of your program. If you don't want to use quantiles, you can - set `keypoints_initializer` instead. -* `keypoints_initializers_fn`: if you know the distribution of your - input features you can provide that directly instead of `quantiles_dir`. - See `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be - a closure that returns a pair of tensors with keypoints inputs and - outputs to use for initialization (must match `num_keypoints` configured - in `hparams`). Alternatively the closure can return a dict mapping - feature name to pairs for initialization per feature. If `quantiles_dir` - and `keypoints_initializers_fn` are set, the later takes precendence, - and the features for which `keypoints_initializers` are not defined - fallback to using the quantiles found in `quantiles_dir`. It uses a - closure instead of the tensors themselves because the graph has to be - created at the time the model is being build, which happens at a later - time. -* `optimizer`: string, `Optimizer` object, or callable that defines the - optimizer to use for training -- if a callable, it will be called with - learning_rate=hparams.learning_rate. -* `config`: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). -* `hparams`: an instance of tfl_hparams.CalibrationHParams. If set to - None default parameters are used. - - -#### Returns: - -A `CalibratedLinearRegressor` estimator. - - -#### Raises: - -* `ValueError`: invalid parameters. -* `KeyError`: type of feature not supported. \ No newline at end of file diff --git a/g3doc/api_docs/python/tensorflow_lattice/calibrated_rtl_classifier.md b/g3doc/api_docs/python/tensorflow_lattice/calibrated_rtl_classifier.md deleted file mode 100644 index 1e3927f..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/calibrated_rtl_classifier.md +++ /dev/null @@ -1,114 +0,0 @@ -
- -
- -# tensorflow_lattice.calibrated_rtl_classifier - -``` python -calibrated_rtl_classifier( - feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None -) -``` - -Calibrated rtl binary classifier model. - - - -This model uses a piecewise lattice calibration function on each of the -inputs (parametrized) and then feeds them to ensemble of random lattices. -num_lattices and lattice_rank (number of inputs to each lattice) must be -specified in the hyperparameter. Optionally calibration can be made monotonic. - -It usually requires a preprocessing step on the data, to calculate the -quantiles of each used feature. This can be done locally or in one worker -only before training, in a separate invocation of your program (or directly). -Typically this can be saved (`save_dir` parameter) to the same -directory where the data is. - -Hyper-parameters are given in the form of the object -tfl_hparams.CalibrationRtlHParams. lattice_rank and num_lattices must -be specified; there would be no default value for this. It also takes in -per-feature parameters. - -Internally values will be converted to tf.float32. - -Example: - -```python -def input_fn_train: ... -def input_fn_eval: ... - -my_feature_columns=[...] - -# Have a separate program flag to generate the quantiles. Need to be run -# only once. -if FLAGS.create_quantiles: - pwl_calibrators_layers.calculate_quantiles_for_keypoints( - input_fn=input_fn_train, - feature_columns=my_feature_columns, - save_dir=FLAGS.data_dir, - num_quantiles=1000, - override=True) - return # Exit program. - -hparams = hparams.CalibratedRtlHparams(num_lattices=10, lattice_rank=2) -estimator = calibrated_rtl.calibrated_rtl_classifier( - feature_columns=feature_columns, hparams=hparams) -estimator.train(input_fn=input_fn_train) -estimator.evaluate(input_fn=input_fn_eval) -estimator.predict(input_fn=input_fn_predict) -``` - -#### Args: - -* `feature_columns`: Optional, an iteratable containing all the feature - columns used by the model. All items in the set should be instances of - classes derived from `FeatureColumn`. If not given, the model will - use as features the tensors returned by input_fn. - Supported types of columns: RealValuedColumn. -* `model_dir`: Directory to save model parameters, graphs and etc. This can - also be used to load checkpoints from the directory into an estimator to - continue training a previously saved model. -* `quantiles_dir`: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated only once with - `pwl_calibration_layers.calculate_quantiles_for_keypoints` in a separate - invocation of your program. If you don't want to use quantiles, you can - set `keypoints_initializer` instead. -* `keypoints_initializers_fn`: if you know the distribution of your - input features you can provide that directly instead of `quantiles_dir`. - See `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be - a closure that returns a pair of tensors with keypoints inputs and - outputs to use for initialization (must match `num_keypoints` configured - in `hparams`). Alternatively the closure can return a dict mapping - feature name to pairs for initialization per feature. If `quantiles_dir` - and `keypoints_initializers_fn` are set, the later takes precendence, - and the features for which `keypoints_initializers` are not defined - fallback to using the quantiles found in `quantiles_dir`. It uses a - closure instead of the tensors themselves because the graph has to be - created at the time the model is being build, which happens at a later - time. -* `optimizer`: string, `Optimizer` object, or callable that defines the - optimizer to use for training -- if a callable, it will be called with - learning_rate=hparams.learning_rate. -* `config`: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). -* `hparams`: an instance of tfl_hparams.CalibrationRtlHParams. If set to - None default parameters are used. - - -#### Returns: - -A `calibrated_rtl_classifier` estimator. - - -#### Raises: - -* `ValueError`: invalid parameters. -* `KeyError`: type of feature not supported. \ No newline at end of file diff --git a/g3doc/api_docs/python/tensorflow_lattice/calibrated_rtl_regressor.md b/g3doc/api_docs/python/tensorflow_lattice/calibrated_rtl_regressor.md deleted file mode 100644 index 9365747..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/calibrated_rtl_regressor.md +++ /dev/null @@ -1,112 +0,0 @@ -
- -
- -# tensorflow_lattice.calibrated_rtl_regressor - -``` python -calibrated_rtl_regressor( - feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None -) -``` - -Calibrated rtl regressor model. - -This model uses a piecewise lattice calibration function on each of the -inputs (parametrized) and then feeds them to ensemble of random lattices. -num_lattices and lattice_rank (number of inputs to each lattice) must be -specified in the hyperparameter. Optionally calibration can be made monotonic. - -It usually requires a preprocessing step on the data, to calculate the -quantiles of each used feature. This can be done locally or in one worker -only before training, in a separate invocation of your program (or directly). -Typically this can be saved (`save_dir` parameter) to the same -directory where the data is. - -Hyper-parameters are given in the form of the object -tfl_hparams.CalibrationRtlHParams. lattice_rank and num_lattices must -be specified; there would be no default value for this. It also takes in -per-feature parameters. - -Internally values will be converted to tf.float32. - -Example: - -```python -def input_fn_train: ... -def input_fn_eval: ... - -my_feature_columns=[...] - -# Have a separate program flag to generate the quantiles. Need to be run -# only once. -if FLAGS.create_quantiles: - pwl_calibrators_layers.calculate_quantiles_for_keypoints( - input_fn=input_fn_train, - feature_columns=my_feature_columns, - save_dir=FLAGS.data_dir, - num_quantiles=1000, - override=True) - return # Exit program. - -hparams = hparams.CalibratedRtlHparams(num_lattices=10, lattice_rank=2) -estimator = calibrated_rtl.calibrated_rtl_classifier( - feature_columns=feature_columns, hparams=hparams) -estimator.train(input_fn=input_fn_train) -estimator.evaluate(input_fn=input_fn_eval) -estimator.predict(input_fn=input_fn_test) -``` - -#### Args: - -* `feature_columns`: Optional, an iteratable containing all the feature - columns used by the model. All items in the set should be instances of - classes derived from `FeatureColumn`. If not given, the model will - use as features the tensors returned by input_fn. - Supported types of columns: RealValuedColumn. -* `model_dir`: Directory to save model parameters, graphs and etc. This can - also be used to load checkpoints from the directory into an estimator to - continue training a previously saved model. -* `quantiles_dir`: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated only once with - `pwl_calibration_layers.calculate_quantiles_for_keypoints` in a separate - invocation of your program. If you don't want to use quantiles, you can - set `keypoints_initializer` instead. -* `keypoints_initializers_fn`: if you know the distribution of your - input features you can provide that directly instead of `quantiles_dir`. - See `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be - a closure that returns a pair of tensors with keypoints inputs and - outputs to use for initialization (must match `num_keypoints` configured - in `hparams`). Alternatively the closure can return a dict mapping - feature name to pairs for initialization per feature. If `quantiles_dir` - and `keypoints_initializers_fn` are set, the later takes precendence, - and the features for which `keypoints_initializers` are not defined - fallback to using the quantiles found in `quantiles_dir`. It uses a - closure instead of the tensors themselves because the graph has to be - created at the time the model is being build, which happens at a later - time. -* `optimizer`: string, `Optimizer` object, or callable that defines the - optimizer to use for training -- if a callable, it will be called with - learning_rate=hparams.learning_rate. -* `config`: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). -* `hparams`: an instance of tfl_hparams.CalibrationRtlHParams. If set to - None default parameters are used. - - -#### Returns: - -A `calibrated_rtl_regressor` estimator. - - -#### Raises: - -* `ValueError`: invalid parameters. -* `KeyError`: type of feature not supported. \ No newline at end of file diff --git a/g3doc/api_docs/python/tensorflow_lattice/calibration_layer.md b/g3doc/api_docs/python/tensorflow_lattice/calibration_layer.md deleted file mode 100644 index b5b65c8..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/calibration_layer.md +++ /dev/null @@ -1,96 +0,0 @@ -
- -
- -# tensorflow_lattice.calibration_layer - -``` python -calibration_layer( - uncalibrated_tensor, - num_keypoints, - keypoints_initializers=None, - keypoints_initializer_fns=None, - bound=False, - monotonic=None, - missing_input_values=None, - missing_output_values=None, - l1_reg=None, - l2_reg=None, - l1_laplacian_reg=None, - l2_laplacian_reg=None, - name=None -) -``` - -Creates a calibration layer for uncalibrated values. - -Returns a calibrated tensor of the same shape as the uncalibrated continuous -signals passed in, and a list of projection ops, that must be applied at -each step (or every so many steps) to project the model to a feasible space: -used for bounding the outputs or for imposing monotonicity -- the list will be -empty if bound and monotonic are not set. - -#### Args: - -* `uncalibrated_tensor`: Tensor of shape [batch_size, ...] with uncalibrated - values. -* `num_keypoints`: Number of keypoints to use. Either a scalar value that - will be used for every uncalibrated signal, or a list of n values, - per uncalibrated signal -- uncalibrated is first flattened ( - see tf.contrib.layers.flatten) to [batch_size, n], and there should - be one value in the list per n. If a value of the list is 0 or None - the correspondent signal won't be calibrated. -* `keypoints_initializers`: For evaluation or inference (or when resuming - training from a checkpoint) the values will be loaded from disk, so they - don't need to be given (leave it as None). - Otherwise provide either a tuple of two tensors of shape [num_keypoints], - or a list of n pairs of tensors, each of shape [num_keypoints]. In this - list there should be one pair per uncalibrated signal, just like - num_keypoints above. Notice that num_keypoints can be different per - signal. -* `keypoints_initializer_fns`: Like keypoints_initializers but using lambda - initializers. They should be compatible with tf.get_variable. If this is - set, then keypoints_initializers must be None. -* `bound`: boolean whether output of calibration must be bound. Alternatively - a list of n booleans, one per uncalibrated value, like num_keypoints - above. -* `monotonic`: whether calibration is monotonic: None or 0 means no - monotonicity. Positive or negative values mean increasing or decreasing - monotonicity respectively. Alternatively a list of n monotonic values, - one per uncalibrated value, like num_keypoints above. -* `missing_input_values`: If set, and if the input has this value it is assumed - to be missing and the output will either be calibrated to some value - between `[calibration_output_min, calibration_output_max]` or set to a - fixed value set by missing_output_value. Limitation: it only works for - scalars. Either one value for all inputs, or a list with one value per - uncalibrated value. -* `missing_output_values`: Requires missing_input_value also to be set. If set - if will convert missing input to this value. Either one value for all - outputs, or a list with one value per uncalibrated value. -* `l1_reg`: (list of floats or float) l1 regularization amount. - If float, then same value is applied to all dimensions. -* `l2_reg`: (list of floats or float) l2 regularization amount. - If float, then same value is applied to all dimensions. -* `l1_laplacian_reg`: (list of floats or float) l1 laplacian - regularization amount. If float, then same value is applied to all - dimensions. -* `l2_laplacian_reg`: (list of floats or float) l2 laplacian - regularization amount. If float, then same value is applied to all - dimensions. -* `name`: Name scope for operations. - - -#### Returns: - -A tuple of: -* calibrated tensor of shape [batch_size, ...], the same shape as - uncalibrated. -* list of projection ops, that must be applied at each step (or every so - many steps) to project the model to a feasible space: used for bounding - the outputs or for imposing monotonicity. Empty if none are requested. -* None or tensor with regularization loss. - - -#### Raises: - -* `ValueError`: If dimensions don't match. \ No newline at end of file diff --git a/g3doc/api_docs/python/tensorflow_lattice/calibrator_regularization.md b/g3doc/api_docs/python/tensorflow_lattice/calibrator_regularization.md deleted file mode 100644 index 67f64bd..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/calibrator_regularization.md +++ /dev/null @@ -1,39 +0,0 @@ -
- -
- -# tensorflow_lattice.calibrator_regularization - -``` python -calibrator_regularization( - output_keypoints, - l1_reg=None, - l2_reg=None, - l1_laplacian_reg=None, - l2_laplacian_reg=None, - name='calibrator_regularization' -) -``` - -Returns a calibrator regularization op. - -#### Args: - -output_keypoints: (Rank-1 tensor with shape [num_keypoints]) 1d calibrator's - output keypoints tensor. -l1_reg: (float) l1 regularization amount. -l2_reg: (float) l2 regularization amount. -l1_laplacian_reg: (float) l1 Laplacian regularization amount. -l2_laplacian_reg: (float) l2 Laplacian regularization amount. -name: name scope of calibrator regularization. - - -#### Returns: - -Rank-0 tensor (scalar) that contains calibrator regularization. - - -#### Raises: - -* `ValueError`: * If output_keypoints is not rank-1 tensor. - * If the shape of output_keypoints is unknown. \ No newline at end of file diff --git a/g3doc/api_docs/python/tensorflow_lattice/ensemble_lattices_layer.md b/g3doc/api_docs/python/tensorflow_lattice/ensemble_lattices_layer.md deleted file mode 100644 index 836943f..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/ensemble_lattices_layer.md +++ /dev/null @@ -1,69 +0,0 @@ -
- -
- -# tensorflow_lattice.ensemble_lattices_layer - -``` python -ensemble_lattices_layer( - input_tensor, - lattice_sizes, - structure_indices, - is_monotone=None, - output_dim=1, - interpolation_type='hypercube', - lattice_initializers=None, - l1_reg=None, - l2_reg=None, - l1_torsion_reg=None, - l2_torsion_reg=None, - l1_laplacian_reg=None, - l2_laplacian_reg=None -) -``` - -Creates a ensemble of lattices layer. - -Returns a list of output of lattices, lattice parameters, and projection ops. - -#### Args: - -* `input_tensor`: [batch_size, input_dim] tensor. -* `lattice_sizes`: A list of lattice sizes of each dimension. -* `structure_indices`: A list of list of ints. structure_indices[k] is a list - of indices that belongs to kth lattices. -* `is_monotone`: A list of input_dim booleans, boolean or None. If None or - False, lattice will not have monotonicity constraints. If - is_monotone[k] == True, then the lattice output has the non-decreasing - monotonicity with respect to input_tensor[?, k] (the kth coordinate). If - True, all the input coordinate will have the non-decreasing monotonicity. -* `output_dim`: Number of outputs. -* `interpolation_type`: 'hypercube' or 'simplex'. -* `lattice_initializers`: (Optional) A list of initializer for each lattice - parameter vectors. lattice_initializer[k] is a 2D tensor - [output_dim, parameter_dim[k]], where parameter_dim[k] is the number of - parameter in the kth lattice. If None, lattice_param_as_linear initializer - will be used with - linear_weights=[1 if monotone else 0 for monotone in is_monotone]. -* `l1_reg`: (float) l1 regularization amount. -* `l2_reg`: (float) l2 regularization amount. -* `l1_torsion_reg`: (float) l1 torsion regularization amount. -* `l2_torsion_reg`: (float) l2 torsion regularization amount. -* `l1_laplacian_reg`: (list of floats or float) list of L1 Laplacian - regularization amount per each dimension. If a single float value is - provided, then all diemnsion will get the same value. -* `l2_laplacian_reg`: (list of floats or float) list of L2 Laplacian - regularization amount per each dimension. If a single float value is - provided, then all diemnsion will get the same value. - - -#### Returns: - -A tuple of: -* a list of output tensors, [batch_size, output_dim], with length - len(structure_indices), i.e., one for each lattice. -* a list of parameter tensors shape [output_dim, parameter_dim] -* None or projection ops, that must be applied at each - step (or every so many steps) to project the model to a feasible space: - used for bounding the outputs or for imposing monotonicity. -* None or a regularization loss, if regularization is configured. \ No newline at end of file diff --git a/g3doc/api_docs/python/tensorflow_lattice/input_calibration_layer.md b/g3doc/api_docs/python/tensorflow_lattice/input_calibration_layer.md deleted file mode 100644 index 633d3bf..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/input_calibration_layer.md +++ /dev/null @@ -1,102 +0,0 @@ -
- -
- -# tensorflow_lattice.input_calibration_layer - -``` python -input_calibration_layer( - columns_to_tensors, - num_keypoints, - feature_columns=None, - keypoints_initializers=None, - keypoints_initializer_fns=None, - bound=False, - monotonic=None, - missing_input_values=None, - missing_output_values=None, - l1_reg=None, - l2_reg=None, - l1_laplacian_reg=None, - l2_laplacian_reg=None, - dtype=dtypes.float32 -) -``` - -Creates a calibration layer for the given input and feature_columns. - -Returns a tensor with the calibrated values of the given features, a list -of the names of the features in the order they feature in the returned, and -a list of projection ops, that must be applied at each step (or every so many -steps) to project the model to a feasible space: used for bounding the outputs -or for imposing monotonic -- the list will be empty if bound and -monotonic are not set. - -#### Args: - -* `columns_to_tensors`: A mapping from feature name to tensors. 'string' key - means a base feature (not-transformed). If feature_columns is not set - these are the features calibrated. Otherwise the transformed - feature_columns are the ones calibrated. -* `num_keypoints`: Number of keypoints to use. Either a single int, or a dict - mapping feature names to num_keypoints. If a value of the dict is 0 or - None the correspondent feature won't be calibrated. -* `feature_columns`: Optional. If set to a set of FeatureColumns, these will - be the features used and calibrated. -* `keypoints_initializers`: For evaluation or inference (or when resuming - training from a checkpoint) the values will be loaded from disk, so they - don't need to be given (leave it as None). - Either a tuple of two tensors of shape [num_keypoints], or a dict mapping - feature names to pair of tensors of shape [num_keypoints[feature_name]]. - See load_keypoints_from_quantiles or uniform_keypoints_for_signal on how - to generate these (module keypoints_initialization). -* `keypoints_initializer_fns`: Like keypoints_initializers but using lambda - initializers. They should be compatible with tf.get_variable. If this is - set, then keypoints_initializers must be None. -* `bound`: boolean whether output of calibration must be bound. Alternatively - a dict mapping feature name to boundness. -* `monotonic`: whether calibration has to be kept monotonic: None or 0 means - no monotonic. Positive or negative values mean increasing or decreasing - monotonic respectively. Alternatively a dict mapping feature name - to monotonic. -* `missing_input_values`: If set, and if the input has this value it is assumed - to be missing and the output will either be calibrated to some value - between `[calibration_output_min, calibration_output_max]` or set to a - fixed value set by missing_output_value. Limitation: it only works for - scalars. Either one value for all inputs, or a dict mapping feature name - to missing_input_value for the respective feature. -* `missing_output_values`: Requires missing_input_value also to be set. If set - if will convert missing input to this value. Either one value for all - inputs, or a dict mapping feature name to missing_input_value for the - respective feature. -* `l1_reg`: ({feature_name: float} dict or float) l1 regularization amount. - If float, then same value is applied to all features. -* `l2_reg`: ({feature_name: float} dict or float) l2 regularization amount. - If float, then same value is applied to all features. -* `l1_laplacian_reg`: ({feature_name: float} dict or float) l1 laplacian - regularization amount. If float, then same value is applied to all - features. -* `l2_laplacian_reg`: ({feature_name: float} dict or float) l2 laplacian - regularization amount. If float, then same value is applied to all - features. -* `dtype`: If any of the scalars are not given as tensors, they are converted - to tensors with this dtype. - - -#### Returns: - -A tuple of: -* calibrated tensor of shape [batch_size, sum(features dimensions)]. -* list of the feature names in the order they feature in the calibrated - tensor. A name may appear more than once if the feature is - multi-dimension (for instance a multi-dimension embedding) -* list of projection ops, that must be applied at each step (or every so - many steps) to project the model to a feasible space: used for bounding - the outputs or for imposing monotonicity. Empty if none are requested. -* None or tensor with regularization loss. - - -#### Raises: - -* `ValueError`: if dtypes are incompatible. - diff --git a/g3doc/api_docs/python/tensorflow_lattice/input_calibration_layer_from_hparams.md b/g3doc/api_docs/python/tensorflow_lattice/input_calibration_layer_from_hparams.md deleted file mode 100644 index 770b2a1..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/input_calibration_layer_from_hparams.md +++ /dev/null @@ -1,76 +0,0 @@ -
- -
- -# tensorflow_lattice.input_calibration_layer_from_hparams - -``` python -input_calibration_layer_from_hparams( - columns_to_tensors, - feature_columns, - hparams, - quantiles_dir=None, - keypoints_initializers=None, - name=None, - dtype=dtypes.float32 -) -``` - -Creates a calibration layer for the input using hyper-parameters. - -Similar to `input_calibration_layer` but reads its parameters from a -`CalibratedHParams` object. - -#### Args: - -* `columns_to_tensors`: A mapping from feature name to tensors. 'string' key - means a base feature (not-transformed). If feature_columns is not set - these are the features calibrated. Otherwise the transformed - feature_columns are the ones calibrated. -* `feature_columns`: An iterable containing all the feature columns used by the - model. Optional, if not set the model will use all features given in - columns_to_tensors. All items in the set should be instances of - classes derived from `FeatureColumn`. -* `hparams`: Hyper-parameters, need to inherit from `CalibratedHParams`. - It is also changed to include all feature names found in - `feature_columns`. See `CalibratedHParams` and `input_calibration_layer` - for descriptions of how these hyper-parameters work. -* `quantiles_dir`: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated with `pwl_calibration_layers.calculate_quantiles_for_keypoints`, - maybe in a separate invocation of your program. Different models that - share the same quantiles information -- so this needs to be generated only - once when hyper-parameter tuning. If you don't want to use quantiles, you - can set `keypoints_initializers` instead. -* `keypoints_initializers`: if you know the distribution of your - input features you can provide that directly instead of `quantiles_dir`. - See `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be - a pair of tensors with keypoints inputs and outputs to use for - initialization (must match `num_keypoints` configured in `hparams`). - Alternatively can be given as a dict mapping feature name to pairs, - for initialization per feature. If `quantiles_dir` and - `keypoints_initializer` are set, the later takes precendence, and the - features for which `keypoints_initializers` are not defined fallback to - using the quantiles found in `quantiles_dir`. -* `name`: Name scope for layer. -* `dtype`: If any of the scalars are not given as tensors, they are converted - to tensors with this dtype. - - -#### Returns: - -A tuple of: -* calibrated tensor of shape [batch_size, sum(features dimensions)]. -* list of the feature names in the order they feature in the calibrated - tensor. A name may appear more than once if the feature is - multi-dimension (for instance a multi-dimension embedding) -* list of projection ops, that must be applied at each step (or every so - many steps) to project the model to a feasible space: used for bounding - the outputs or for imposing monotonicity. Empty if none are requested. -* None or tensor with regularization loss. - - -#### Raises: - -* `ValueError`: if dtypes are incompatible. - diff --git a/g3doc/api_docs/python/tensorflow_lattice/lattice.md b/g3doc/api_docs/python/tensorflow_lattice/lattice.md deleted file mode 100644 index 1ac115d..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/lattice.md +++ /dev/null @@ -1,36 +0,0 @@ -
- -
- -# tensorflow_lattice.lattice - -``` python -lattice( - input_tensor, - parameter_tensor, - lattice_sizes, - interpolation_type='hypercube' -) -``` - -Returns an interpolated look-up table (lattice) op. - -#### Args: - -* `input_tensor`: [batch_size, input_dim] tensor. -* `parameter_tensor`: [output_dim, param_dim] tensor, where param_dim == - lattice_sizes[0] * ... * lattice_sizes[input_dim - 1]. -* `lattice_sizes`: A list of lattice sizes of each dimension. -* `interpolation_type`: 'hypercube' or 'simplex'. - - -#### Returns: - -* `output_tensor`: [batch_size, num_outputs] tensor that contains the output of - hypercube lattice. - - -#### Raises: - -* `ValueError`: If interpolation_type is not 'hypercube' nor 'simplex'. - diff --git a/g3doc/api_docs/python/tensorflow_lattice/lattice_layer.md b/g3doc/api_docs/python/tensorflow_lattice/lattice_layer.md deleted file mode 100644 index d4824f4..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/lattice_layer.md +++ /dev/null @@ -1,69 +0,0 @@ -
- -
- -# tensorflow_lattice.lattice_layer - -``` python -lattice_layer( - input_tensor, - lattice_sizes, - is_monotone=None, - output_dim=1, - interpolation_type='hypercube', - lattice_initializer=None, - l1_reg=None, - l2_reg=None, - l1_torsion_reg=None, - l2_torsion_reg=None, - l1_laplacian_reg=None, - l2_laplacian_reg=None -) -``` - -Creates a lattice layer. - -Returns an output of lattice, lattice parameters, and projection ops. - -#### Args: - -* `input_tensor`: [batch_size, input_dim] tensor. -* `lattice_sizes`: A list of lattice sizes of each dimension. -* `is_monotone`: A list of input_dim booleans, boolean or None. If None or - False, lattice will not have monotonicity constraints. If - is_monotone[k] == True, then the lattice output has the non-decreasing - monotonicity with respect to input_tensor[?, k] (the kth coordinate). If - True, all the input coordinate will have the non-decreasing monotonicity. -* `output_dim`: Number of outputs. -* `interpolation_type`: 'hypercube' or 'simplex'. -* `lattice_initializer`: (Optional) Initializer for lattice parameter vectors, - a 2D tensor [output_dim, parameter_dim] (where parameter_dim == - lattice_sizes[0] * ... * lattice_sizes[input_dim - 1]). If None, - lattice_param_as_linear initializer will be used with - linear_weights=[1 if monotone else 0 for monotone in is_monotone]. -* `l1_reg`: (float) l1 regularization amount. -* `l2_reg`: (float) l2 regularization amount. -* `l1_torsion_reg`: (float) l1 torsion regularization amount. -* `l2_torsion_reg`: (float) l2 torsion regularization amount. -* `l1_laplacian_reg`: (list of floats or float) list of L1 Laplacian - regularization amount per each dimension. If a single float value is - provided, then all diemnsion will get the same value. -* `l2_laplacian_reg`: (list of floats or float) list of L2 Laplacian - regularization amount per each dimension. If a single float value is - provided, then all diemnsion will get the same value. - - -#### Returns: - -A tuple of: -* output tensor of shape [batch_size, output_dim] -* parameter tensor of shape [output_dim, parameter_dim] -* None or projection ops, that must be applied at each - step (or every so many steps) to project the model to a feasible space: - used for bounding the outputs or for imposing monotonicity. -* None or a regularization loss, if regularization is configured. - - -#### Raises: - -* `ValueError`: for invalid parameters. \ No newline at end of file diff --git a/g3doc/api_docs/python/tensorflow_lattice/lattice_regularization.md b/g3doc/api_docs/python/tensorflow_lattice/lattice_regularization.md deleted file mode 100644 index b52ee03..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/lattice_regularization.md +++ /dev/null @@ -1,49 +0,0 @@ -
- -
- -# tensorflow_lattice.lattice_regularization - -``` python -lattice_regularization( - lattice_params, - lattice_sizes, - l1_reg=None, - l2_reg=None, - l1_torsion_reg=None, - l2_torsion_reg=None, - l1_laplacian_reg=None, - l2_laplacian_reg=None, - name='lattice_regularization' -) -``` - -Returns a lattice regularization op. - -#### Args: - -lattice_params: (Rank-2 tensor with shape [output_dim, param_dim]) Lattice - parameter tensor. -lattice_sizes: (list of integers) lattice size of each dimension. -l1_reg: (float) l1 regularization amount. -l2_reg: (float) l2 regularization amount. -l1_torsion_reg: (float) l1 torsion regularization amount. -l2_torsion_reg: (float) l2 torsion regularization amount. -l1_laplacian_reg: (list of floats or float) list of L1 Laplacian - regularization amount per each dimension. If a single float value is - provided, then all diemnsion will get the same value. -l2_laplacian_reg: (list of floats or float) list of L2 Laplacian - regularization amount per each dimension. If a single float value is - provided, then all diemnsion will get the same value. -name: name scope of lattice regularization. - - -#### Returns: - -Rank-0 tensor (scalar) that contains lattice regularization. - - -#### Raises: - -* `ValueError`: * lattice_param is not rank-2 tensor. - * output_dim or param_dim is unknown. \ No newline at end of file diff --git a/g3doc/api_docs/python/tensorflow_lattice/load_keypoints_from_quantiles.md b/g3doc/api_docs/python/tensorflow_lattice/load_keypoints_from_quantiles.md deleted file mode 100644 index b6ace1f..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/load_keypoints_from_quantiles.md +++ /dev/null @@ -1,57 +0,0 @@ -
- -
- -# tensorflow_lattice.load_keypoints_from_quantiles - -``` python -load_keypoints_from_quantiles( - feature_names, - save_dir, - num_keypoints, - output_min, - output_max, - dtype=dtypes.float32 -) -``` - -Retrieves keypoints initialization values for selected features. - -It expects that the quantiles have already been calculated and saved in the -save_dir by the save_quantiles_for_keypoints function. It will raise -an I/O error if not. - -#### Args: - -* `feature_names`: List of features names for which to get keypoints - initialization values. -* `save_dir`: Directory where the quantiles have been saved to. Same value used - when save_quantiles_for_keypoints was called. -* `num_keypoints`: Desired number of keypoints to use for calibration. This - can either be a scalar to be used for all features, or a dict mapping - feature name to num_keypoints. Fewer keypoints than requested can end - up being used when for the given feature there are not enough different - values. If num_keypoints for a feature is missing, None or 0, no - initialization is generated. -* `output_min`: Initial calibrated value associated with the first calibration - keypoint. The keypoints outputs in between will be linearly interpolated. - It can be given as a scalar, in which case value is used for all features, - or a dict mapping feature name to output_min. -* `output_max`: Like output_min, but the calibrated value associated to the - last keypoint. Scalar or dict. -* `dtype`: Type to be used for calibration. - - -#### Returns: - -Dict of feature name to pair of constant tensors that can be used to -initialize calibrators keypoints inputs and outputs. - - -#### Raises: - -* `tf.errors.NotFoundError`: if quantiles file not found. - - - values in the signal. This would probably be better handled as categorical, - but still this should handle the case correctly. \ No newline at end of file diff --git a/g3doc/api_docs/python/tensorflow_lattice/monotone_lattice.md b/g3doc/api_docs/python/tensorflow_lattice/monotone_lattice.md deleted file mode 100644 index e666518..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/monotone_lattice.md +++ /dev/null @@ -1,58 +0,0 @@ -
- -
- -# tensorflow_lattice.monotone_lattice - -``` python -monotone_lattice( - lattice_params, - is_monotone=[], - lattice_sizes=[], - tolerance=1e-07, - max_iter=1000, - name=None -) -``` - -Returns a projected lattice parameters onto the monotonicity constraints. - -Monotonicity constraints are specified is_monotone. If is_monotone[k] == True, -then the kth input has a non-decreasing monotonicity, otherwise there will be no -constraints. - -This operator uses an iterative algorithm, Alternating Direction Method of -Multipliers (ADMM) method, to find the projection, so tolerance and max_iter can -be used to control the accuracy vs. the time spent trade-offs in the ADMM -method. - -Inputs - lattice_params: 2D tensor, `[number of outputs, number of parameters]` - -Params - is_monotone: 1D bool tensor that contains whether the kth dimension should be - monotonic. - lattice_sizes: 1D int tensor that contains a lattice size per each dimension, - [m_0, ..., m_{d - 1}]. - tolerance: The tolerance in ||true projection - projection|| in the ADMM - method. - max_iter: Maximum number of iterations in the ADMM method. - -Outputs - projected_lattice_params: 2D tensor, - `[number of outputs, number of parameters]`, that contains the projected - parameters. - -#### Args: - -* `lattice_params`: A `Tensor`. Must be one of the following types: `float32`, `float64`. -* `is_monotone`: An optional list of `bools`. Defaults to `[]`. -* `lattice_sizes`: An optional list of `ints`. Defaults to `[]`. -* `tolerance`: An optional `float`. Defaults to `1e-07`. -* `max_iter`: An optional `int`. Defaults to `1000`. -* `name`: A name for the operation (optional). - - -#### Returns: - -A `Tensor`. Has the same type as `lattice_params`. \ No newline at end of file diff --git a/g3doc/api_docs/python/tensorflow_lattice/monotonic_projection.md b/g3doc/api_docs/python/tensorflow_lattice/monotonic_projection.md deleted file mode 100644 index 4d61848..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/monotonic_projection.md +++ /dev/null @@ -1,41 +0,0 @@ -
- -
- -# tensorflow_lattice.monotonic_projection - -``` python -monotonic_projection( - values, - increasing, - name=None -) -``` - -Returns a not-strict monotonic projection of the vector. - -The returned vector is of the same size as the input and values (optionally) -changed to make them monotonically, minimizing the sum of the square distance -to the original values. - -This is part of the set of ops that support monotonicity in piecewise-linear -calibration. - -Note that the gradient is undefined for this function. - - values: `Tensor` with values to be made monotonic. - increasing: Defines if projection it to monotonic increasing values - or to monotonic decreasing ones. - - monotonic: output `Tensor` with values made monotonic. - -#### Args: - -* `values`: A `Tensor`. Must be one of the following types: `float32`, `float64`. -* `increasing`: A `Tensor` of type `bool`. -* `name`: A name for the operation (optional). - - -#### Returns: - -A `Tensor`. Has the same type as `values`. \ No newline at end of file diff --git a/g3doc/api_docs/python/tensorflow_lattice/pwl_indexing_calibrator.md b/g3doc/api_docs/python/tensorflow_lattice/pwl_indexing_calibrator.md deleted file mode 100644 index a6534c0..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/pwl_indexing_calibrator.md +++ /dev/null @@ -1,42 +0,0 @@ -
- -
- -# tensorflow_lattice.pwl_indexing_calibrator - -``` python -pwl_indexing_calibrator( - input, - kp_inputs, - name=None -) -``` - -Returns tensor representing interpolation weights in a piecewise linear - -function. If using a large number of keypoints, try PwlIndexingCalibratorSparse. - -Notice that in this version the keypoints inputs (given by kp_inputs) is kept -fixed by forcing its gradient to be always 0. FutureWork: allow kp_inputs to -also be optimized, by providing a gradient. - -Inputs - input: uncalibrated weights, `[batch_size]` - kp_input: keypoints' input weights, can be initialized with the - pwl_calibrator_initialize_input_keypoints op. `[num_keypoints]` - -Outputs - weights: Interpolation weights for a piecewise linear function. Its shape is - `[batch_size, num_keypoints]`. The dot product of this and the keypoints - output will give the calibrated value. - -#### Args: - -* `input`: A `Tensor`. Must be one of the following types: `float32`, `float64`. -* `kp_inputs`: A `Tensor`. Must have the same type as `input`. -* `name`: A name for the operation (optional). - - -#### Returns: - -A `Tensor`. Has the same type as `input`. \ No newline at end of file diff --git a/g3doc/api_docs/python/tensorflow_lattice/save_quantiles_for_keypoints.md b/g3doc/api_docs/python/tensorflow_lattice/save_quantiles_for_keypoints.md deleted file mode 100644 index 338e1f7..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/save_quantiles_for_keypoints.md +++ /dev/null @@ -1,78 +0,0 @@ -
- -
- -# tensorflow_lattice.save_quantiles_for_keypoints - -``` python -save_quantiles_for_keypoints( - input_fn, - save_dir, - feature_columns=None, - num_steps=1, - override=True, - num_quantiles=1000, - dtype=dtypes.float32 -) -``` - -Calculates and saves quantiles for given features. - -These values can later be retrieved and used by keypoints_from_quantiles() -below. - -Repeated values are discarded before the quantiles are calculated. That means -that the quantiles of a very skewed distribution (for instance where 99% -of the values are 0), will be different. But for the purpose of calibration -this approach is more useful. - -Nothing is returned, the values are simply saved in the given location. - -This function can be called as a preprocessing step before actual training -starts. Typically one will run this in a separate process locally, before -starting training for instance. - -#### Args: - -* `input_fn`: Similar to input_fn provided to Estimators. Typically one - doesn't need to go over the full data to get good quantiles. Typically - some 100 random examples per quantile is good enough for the purpose of - calibration. If you don't have too much data, just use everything. - If input_fn returns a target (used in training) it is ignored. -* `save_dir`: Where to save these quantiles. Since when optimizing - hyper-parameters we train various models, we can share the quantiles - information generated here. So this should be a directory that can be - accessed by all training sessions. A subdirectory called "quantiles" will - be created, and inside one file per feature is created: named after the - feature name, and with the quantiles stored in JSON format. -* `feature_columns`: If set, quantiles are generated for these feature columns. - The file name used to save the quantiles uses a hash of the names of the - feature_columns, so it can support different quantiles sets for different - parts of the model if needed. If not set quantiles will be generated for - all features returned by input_fn. -* `num_steps`: number of steps to take over input_fn to gather enough data to - create quantiles. Set to 0 or None to run until queue is exhausted, - like if you used num_epochs in your input_fn. -* `override`: if False it won't regenerate quantiles for files that are already - there. This works as long as the features definition/distribution hasn't - change from one run to another. -* `num_quantiles`: This value should be larger than the maximum number of - keypoints that will be considered for calibrating these features. If - there are not enough quantiles for the keypoints, the system is robust and - will simply interpolate the missing quantiles. Similarly if there are not - enough examples to represent the quantiles, it will interpolate the - quantiles from the examples given. -* `dtype`: Deafult dtype to use, in particular for categorical values. - -Returns: Nothing, results are saved to disk. - - -#### Raises: - -* `errors.OpError`: For I/O errors. - -FutureWork: - * Use Munro-Paterson algorithm to calculate quantiles in a streaming - fashion. See Squawd library. - * Add support to weighted examples. - * Handle cases where there are not enough different values in quantiles. \ No newline at end of file diff --git a/g3doc/api_docs/python/tensorflow_lattice/uniform_keypoints_for_signal.md b/g3doc/api_docs/python/tensorflow_lattice/uniform_keypoints_for_signal.md deleted file mode 100644 index 1f6dfd8..0000000 --- a/g3doc/api_docs/python/tensorflow_lattice/uniform_keypoints_for_signal.md +++ /dev/null @@ -1,47 +0,0 @@ -
- -
- -# tensorflow_lattice.uniform_keypoints_for_signal - -``` python -uniform_keypoints_for_signal( - num_keypoints, - input_min, - input_max, - output_min, - output_max, - dtype=dtypes.float32 -) -``` - -Returns a pair of initialization tensors for calibration keypoints. - -This is used when the input range to be calibrated is known. - -#### Args: - -* `num_keypoints`: number of keypoints to use for calibrating this signal. -* `input_min`: Scalar with the minimum value that the uncalibrated input - can take. -* `input_max`: Scalar with the maximum value that the uncalibrated input - can take. -* `output_min`: Scalar with calibrated value associated with input_min. - Typically the minimum expected calibrated value, but not necessarily. - Specially if the calibration is decreasing. -* `output_max`: Scalar with calibrated scalar value associated with - input_max. -* `dtype`: If any of the scalars are not given as tensors, they are converted - to tensors with this dtype. - - -#### Returns: - -Two tensors to be used as the keypoints_inputs and keypoints_outputs -initialization, uniformly distributed over given ranges. Dtype is given -by input_min, input_max, output_min, output_max. - - -#### Raises: - -* `ValueError`: if underlying types (dtype) don't match. \ No newline at end of file diff --git a/g3doc/images/data_dist.png b/g3doc/images/data_dist.png deleted file mode 100644 index 3e711898a596cfc9f87473741c89d7ace50f8068..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 654474 zcmeF42Yi%8`p19U(|bY^5|Thd?_H#e6dS16u(#8*oO;jb zO$9`H?PwD6GR z5CGvbrcb#7(7Z@LK}K;UX3g$*#4qD*7f+i4C;j)%4N zfC>CUfCYu$3QT|rFaahY3jr1sSuk=TOn?b60VcqL!tVhlzyz286Oe^~Hw5LrCF@4J zd;w%JnhRk9On?b60c8{LhPWu309(fdm;e)C0pIu5IJN&0PXd<}8kVXcAFRu6|o{I=FXYp;REUMF5R{%teY!YY0uB4ZF8 z6N@ehaY#-~Kzw{0;^G>AVq#)oFf`iGyk36I1e8pG1x3ld!?vA9puD^s$B!S!(WALI zlyeNnjvYs7Q2}(uAQ(gApbd_N-4Fv!P#kpPHT|s%juvGoa5x}@$KFr|M}3+2T@I&s zt+!RcX)cAet{j&7Dj_!UNKH*aX7@Ctq@PyI z2F)irO&jpASoQ0qmAK#CJ(XdRjO1%*sHoo;@X@(Q0{p)bHK|Dr^u7iV8c;-%M_3Xh3#$ zHumj5fIa)Nk$dzoOrf#Rgr~zn;vs}XF9bwm5Ytovo@ffqRx5<1Iv=|F6R=h0z*=94 zw2U4Y(6<+Q_3DYNtSkft1qC=MZoV%Fu%P&Yx+3fBfAg!Wz3&T9Sy_o)yLJg7If(uH z52CcB5W&&i;0W&yePj>lqcZ$*1mdZ;fWulVMCK@*)w$5tSyGtyVM?(|U8-|$PVg%Bi1w=m7=Pb$Tw2Ki;F07^dps73v zYi$v_XZ6OI5rZ*U2n>0dabf~;5MV)(gCdt95hyJ!#rpLduyXY};mTVmMj^f6j1(dg z+Z$R_BvVod0cnb?EE}lY4@=2j#6(A9!npG=dh{sa#vHE@E7=S+5@115W3MU5X4((3 zPKd~gRcldPR0u=DKUbKLT6vHdu>x2)E@JOdK~FW6m4R z;?goOzS;%>7L+#N@U4~!&~(;@4IA;}Pis+-cLK)v0no+|hB2m>dm{2T%c9}l_XTLV zF?_WZ0wgqth0tt=wO9yEbfeIWA2(LKrhPt~m_XYESX|oX!*_onKru_UZ{Ln3%YGED zzXxE7?+j;KZ%Q@bUVx^w<0q$3)81g#*iUHpws;lX}}Mg%}weY z0Tz_b@$>uKxw*Mmykt2xY}^1Nx%S47gh7Z2i-_NMeNzKz^QE=F)>0HF6}T zO`U>_jEtsd_yrRn0xT%}J~)ej#bUww4I8j{*$Nco=fjXV0>-4#(r)#$9_9-HMZhk+ z#ViHuVJqB(E?v4{#tL_O5pZ=Q_E>uYr&$g zu~Y0JF(!*|no?d2X#;yB42+@eOJ%8fcOx!jw2ftgNvY zPXKhkSDHvW_N_&`;3!xLn9DKq(u;B7h2jef3yW9y`?3-i6kpQ!V%;<%S^e|R`1G^S z;V>lt$&+A+&hnxKTvZ?vu!*%6+p%SE)*QnXvu0w-lneN9F+YAIu%P&H=Tg&XM6yyW zxPP`_5j0`lpiP}Fg^c&}W^?ltM!;Ts6800{OL4|#Oq+^}FTM!D!QzBXPJTsz1;wwr z^HgJ8Lb71t7eHt?Xi}y^AD(>b0e&Snf#)9IE>7G|$BZeHgiipca}sD2nIB*i!7MKm z(_+(v(6B4=IkBnc0Tvh4bd~#tZrQR0AAbB9YAoXX^wb$Jia?H>n1GT9*s6}haeN6- zmyheNy$a*UjpL(`mDE8UwPpUiIdfT6I!aH0`Eqh{@aEg^p{TqL+HM!Y7}rmMRi ztcUml6HqV#YuN!<4=+O3#Aw`k+s#NxNl~!1Y+7psSWsFcqk`-8_4W9C(HHpP$CWUq zPKKfDc;OCgP=V!aA`>{9faHH%umRTGr5HYJFm9N0EyBaY&wh}vD3$;ViekH_b;F!a zrxb|m-S<9(BP0ce?lYkaj%_^y-|-&;?_Du%S^)V1(5YdTobTW)--9{V%*Mot6L`Tz zCGTby7nO9B%glm;0=)F{YbY$Mg+AkQaSktUG;y)v-~L6wMt;ag79lRwfqU-016{jz z^{)o{VQW}WRMM&BBmBjeU*YR-zJoD+I)YNi3(GYsv7XIj0v#t{F4zQX&NsMl$|TIb zVkYmJ>^Mq~=VU?gIO5J%uxHO6y!^`Rum&Z=&||hZ)IFl}g>nf@Kt%+cwtCod7eiCA z7kAxxD+Uc3q$2AB$5a*+1@|>Az`y(6d~Dpj4W_KC*po}aeZ{7Ehk&gj2bSzl&^x^TQ(?#yqY@)~zIe9>xVG;unVqY2UjV1w zDm=uNz)`#zx88gs&O2{3cbq~9u(&9sBV1|xy{G12IQT6vN$ zGXW+*1f)GJxyxZISc6}S>9AqLhA|i)5@12`A$(WW6oY=@=bz)d#ml926JwXbu3P+@ z2`~XS2-qrfVBYsW1`p_iJMXv^K|%aw%nfX^Kf;0{JJR+QSX5MmfBokLl-22A=s8Dh zT=b9gdfP{VpJoD`C*ZU-z;b8-bhStD!2S1fys*wAZFdPQDD6fjUxrPaHsjUT--I@C z3{2@$#m-nwm*cO0|0gP}5n>_!23|YyAZsp~3AjZ- zoIPYY{yj8B>+twvk0LcS)vav&kb4AJP~79x?x&6&JBFwK`VTnbMj<$3IUk z0-hsaDc&VUE}!Bz_x~D$26EUV*+|cyH)k%ZiflMf6|{EUI=m(}p9J^14u%Ad$9;+n zzhVN)Az-V?hk5twxMJo-xcH(O$}vpEmaw46+<6NYEX21|-Se?*A)jjA1gA z%0(~%Cg3dsv_rDtz(nhOu%7p8WG%K0>ks9mj&w0cfr)6%`eD;>mfa59tL{k6G;6 zh5i5esoMn|D1oyu0Vbey0@ji} zV)M$!`2C~5MOIdpQqBG64GXp?(iecmrJZk?!oosHSde@Hj9tgJGdDlZ1ek!D2w2Oq z#bNjF;h_iaMgRW&)nvL)Y-K@d2lnH~kK?JQ{|0T^B`_wAXh%+dlnF2awGyya=EA(^ z4cv409T?6B-M86|yt}7OJg08b$b{moXuDm^VsZ)#zhVM@PrwkF22;P=@xNEz0G(CA zsdWN8LTQ~%(;Zqr`M)Qgf+l?y4DlRdr3nnbU;+U|z*a3nxa@ib4?lDt`u6P`KrMH_ zsXRhC2dm=ZVo6MpGK0nB9IW^@6YysO`tW2iQn?M!{rkT-aNvMHH?@_vrQfstb@zP% zSX^4cNSA5<$&*h#1834C7?Vb}@g(130!$zf2%2c2?QDeV{9)Nv#ycWSM&1n0x&k2|^E=HL%Nm{h0bCd!($tEsphq!dfBw)(C z0>_HYc>RsHy;!UAs$xMAeX@1yR(!JH3*mcyCp7vX<@FL!7t123t+R^q*x1@#Bngh_4BVD=GHI)Cg3vy^hMQ_bv6F^um6Bf?+c`) zGI5cwYuhyA_`=`U(`e!yl7h3knS_|MTCM;EWjpV{9LP z>_2W36JP==BA^Y5fl)-6q_c(_8X8n&xqGJiwV*8j{s$Z_s6@M8$+q1`) z3AeE?K+A0SDiiQ20g5>oBm&vK_rZL>mXfC;FH0ELRm7SmvxH*Z#x*=O16 z7lJ~;Az%F8tI(%SgVq#zRwMZW6JP>NK*K&qicf{#uu&X_ zBK*A0-C};i^8_ybhqi!y0X)x{Yw&9VrXI7fZrw&?XJ@OjovIg?x8MB$`lK{rmT0@BVC<(x$47JKM_ym;e)Kk$@>_ z6e=4GST3gZ)s|E%D0aIY|9kZf7O6^X0>;6BJVGpMPE}V%RJDq%B`|D%Z^fn1Fu}Fo`{P zM~@d^=gys~Xprgz<=qcHh9Pw_NWD2R0Vco%{Fwl4uh*wtjCbDu2y_m*(vzwL<-mah zIFfS=rql^a=gRgm0Vco%WFcS_PDs^eJyxyyNtW_FS%|6x<^2yog&}P^NVPdJ0WT9c zkb6SRWv64tlu5I_Ts2q61b!hP+=~rjXZ=UQLCI>hDjJJw#D(S)PZn3gn8Y#c75$E4 ztO~TEU(zN5gWz?n1DA37}77o7hipihK2@jRM+l06fY;|xqd zodouLvwuDN0;sd^yl1s8ED8FkOe|fx%zKr#qb5a*3yoAh{$wHasTa1RN&F}iU;<3Q ze+lTs=ixWsE>`5ailPN&`}Xaq5o=M#xc>h8?%qdg8l~85Hbpg7v4V2w&>`d(6vCJ|R8hd#7$(33 zn1HVd&?rS4nuMR%tnsxer`JD!-kiCLRh30weG7fp3D9VCr(4Z`F##qZ3xQ*|+5eAy z0c1hx^Fp-Alkmlt-zq8?m?Ficu&@w&cJGF%>lmMR8XLd_m;e({A^}5eZ`7Kd*t2Jk zV$4&dpnUV)QW1Y;v^Wpgq!>_a2oqoeOh9P_w8<01rj_p$W0v9srLL|{9E`FWh7=Ax zteAHR8^Q#bfFcPP6NlsQksRdZ=C`ABbBbWdaU;;`dKq1-< z$z!m1sWM`sYK^vlcYd zPu1`r)UaR{PvuMG4Bci8*0d)~DL}j41!H&a+ z5Bt<`#R$quamcbUVIaE=`}FO=l`{b*pjHCfgb`T&!wR38uLwb*h52R6e}FdeJfFhg z%9#KYP&&Gs_^Cg39i(n5Vy zchJF8zDkM^6lq~8ew42maJ@``2`~ZwAdoN;OPBj@3e4w%Lg!!Z+_7DRA0Fl(9m=g> z0!)Aj_=tcpt{?KnZu;WlVxL&xb3xg*Z5x7Odq8Ul^$7s3lnF2aCg3jwq#gBLhJhTO ze35)EC@WU2fisp5NAX4HbDd0p33P%$OwpS^E8G`AFT`clPrf-^#i!yzAp{N_*e7;> z4(LP=b50B0bSf+tX{Lh>rM2Tpw!gV;LxFi zV!Erp*Bi|hG65#Q1pJwRAtDX6we?;obK}y>_FR{~FY3@>~EDU;<2l37kzpA2$eV*Ka!e zh6k_sL{O|&E3(PmE3S_Ro5n>m0Vco%0+)b3Djk(o)hH}1^cn=82nq%EGDW38qYw64 zL%AX*zyz2;U=h$jAKM>WwrurUvwb2co40I-GorWG8p#zg0Vco%0+E0=x;Hj$+2OUO z`$SN-Zrd)Dv#-}0$rUjHCcp#&k$@qlCypFG2#dwyM)Q~5c=C`Nb=ywQR`N^fUKf;&o3{f|?CIr|-sMV|022s!0*07A*t*?2Ybst96mlIt zlzjlkm|g+j$qaxAFaaju4FdY`6x3ALptQ8qn5H;gM_0!)Aj zFabe8EH9YidScI>Js!v5wxA4KJnbj>d;uV0+8=}_qKC&D$Q3XFCcp%kz$pTb@Sb7{ zY}cs^9(v`rxX4lej$QkJhzvQ3;xd>36JP>Nz)b>%=pNXA;DDP?cl(#mL;@nJt2{3_<6L5y5OM3%5UP9YBy&@>NxyRrL=BUGM zcLm>N0!)Ajc$R=hOnV(Xl;g2Vydo&sM{2PE3Fa zFaajuTLSvIxM`XJdivtE{Ai(Xoc5OJkWJqBokl)OrZS)=<_%) zPo<-k!oriheZKvD&N-L>6JP=!A)pJ1M{P|N8X6j8FW;jh6odG=`gJNVioC{642`~XBzy#zbKwh{N9rc%J1r9H zvm5Z?&o#a@nd@c(enCJV6prfZYFXRjF+m|i4PvunASa6$8NB{O)oBQVjanfzT8!@# zj2VMNP$z_C)!zEkp5Z^3025#Wo+hBthpJ0Z^u`cRGf{N;b_Z}E-`b*$J98~~{PJjw z?H$B|(jpxCkZ(9A*iX5ceL-(5sJnb*dvs--VBA6c2tPD<`Q}4va!wtw4YYb;5T2mO9W=%siAT7S#RtpiXttnkNFc>urPOLlF7<4W&L<^d_>Jq7gL9db4Yu1Z8<2D(sj4T|P1tlZ3F*Vtlb!3z{}4L=vy&&+uy|zyvx+ zfJQ47mGZ8qVE(*0b3Hm_aq2=l*KC#NrcY%0)NP)~eJNhJI!zUyl%yixH&coW#3wr` z3aQ7pTg5Oze62U=G?LO>B0kBF6xwjO&?>ngjTFw1A~ESM(Gp&ysRLh0`8A( zcOA3fs`DCcb*W00a2F%{X=p)1BiE3rU#d}R4$a{O8#TCPVgzXM=%r=~k}2}uePIM{ zn;ed37L_**caoxXI4G3s1 zOei1CNqx4yM$C1^U`($dNl?fanT81@C@wLfXLk!>pt-8>V4(!E!a@oG#^*n;J}xgYH{E6mRYt|7GGxqOrUiF8Zk_%l6jco zF+nLOL-m3Et&%>B=dW%YDFllT-tJZ5c(Ja=ZwK53Bzg`R0HC?`tW z-c&;Es1hR=64vJFh*(T%E}qTr@oOf)1lmu47QL&A<(>EPn4r|w)3Gn8ohh*FrC-WihKIISdI_~n1}A@33(nu@x3 zXsF~e79s^}B#ei$aobEZ1Ek+Y0CN%9u8$?J*XoQg<7J9zf zS|`eON((vZ@dgA58%e}=if1X&Vhg1jApmElTCs{xiAHIzL~U*E1>m~g{5Q3SMkHq@ z5>)cJ_F(F=bXuXy7r=wnaM4Uac?2~2Ak^14E{@7&UF)AjovguX6DzP9j}_lZ>wK5} z$KCbPsD;)(t{536O>L1it}W!Wp7GAgw!cA484x0^s?(arO0mHuPY4T5iP6V6O`*}6 zO-pINDEp6`w~@DQlK-)*MiyFQpb-fzZqq2hB``#T7O1Zu8z#AB zt=Ll!S|q0r{P{muOG4rzmMlgpwCHbW1v{;q&`)b=_i;0Z2nPWg6PyqN;}RQ+=|a<9 zal)~HMm8iclyZch^O#;hAF=jQP=3lKjqcHU3?*8pX-~=#(_FN1hoo<4VfE@J}aK27h?0yGUIt`+{kG(|+8(W^w9 zmIyHlARl4Zv=RNLzxxXjArC1^Bs??8LC?6-wj8r0gi{BGh(-KUeNFz%V|tpTkrPeHwI|V94Na3>b6%LVwnKTz zrYnSRF()R#1Y{(j)x&0M-@ngu)$XwoicL&;X@XCE;GN5mK)X*L#>2&~KHA}U$*>T7 zE(GR>UG-9Muu;MVxbH8XRWV^Y&>92X+x&}n*s`5<=#yf=gBL}K9fnqEii*C`Zx?Gf zv|d30#^~F8^PP(B@TGMWnmVJkodfx&go75~rw@kYD!f1M)Ekf-5Z+u-h1(}Z;C|ta zRaRpc3;7yJcxb~6xd78hsy&I;n?71ojXCFsd|y9kY11==*5S+Ycac}w$qwKrugPh9k4@e zFL(7saXfFym2~eG`#dLBA7}(7u)=3mk9(cfstSQ@C^F`1Z?-Wr{NAEEPB0svq~Nl7rkD1H96!PR<#Eb zKg$G|025#W=O$nmpV6ui6dtXdyEFL-Ccp%kfX@hs*(9x4q}Q|-!>I#?$v4(HqjG~G zutzAggV<7{&?-OeYryxr>Y5@FxAa_F*JxuSZDw4$L*6K&ZPm&5m;e*-9|BH? zRo3Y)BI8*>3GfAa+QI(un#MCzC`1YcgQQ*Yv@`tTp&`ewf2n1(Vh~;gfxbXbXMQw4KgHFh-;3B? zbfhlD`l5r`T?bsb?z{6_#wk#Ds^e#vKobG!-?a$m;Yu0<9sAs|prx)d__q%Th}k5a zS_DOJ5Q1{XIT}7drpPKO@FJb?_-7FykYahf1-+TxY~ntgzudOj6KAYCOdP+jQ7`TDW*cLSdtPPQYmw z@3HDU*z1bnXefi;S_ZAH5)Ml(Y-VwUGyPc1;#;i_dc&zh?_j~im8*RL$E zlG;w;d+9vlToJKF?-cawnS%6gLy-^{*_XKU>gqZ`%bt* zT40Lp2}fif=%af=7ZlZEUcM+l0f)5?A))dfNaV2*N>r2>_a1Kiu#+E!f0S4!{D~E1jhJ)KHLG3QB8!h>6%tR~_4ZdY@2DSE7+hp*Vb`t-P*h`L~}D z5C@5v2pP?}MCrnT~D2jo@a z@b=@SHhlPV4NjEVnhvi2{miH)k!dYIUsovsF~wym*$w0^6Egx;xP0mmOgQfj1P7h_ z3y6|iOzSfvhGa=UW#u*aX8BgET=NV}@x7txIu-ixu1Ypv6}D;YVuTXWMOIrqHbSAf zITnwPX)$N54T#xCU2WH5@|+cJJ^4yOL?dM2F#~ ztH)#ZrK7NHT4jc zvznYu_k39)Mh)(rcJ}~;og+?ip`v>DMSI3}0cx};s6|O%5Bn)Ns0?Y zLC9#jjgvYFI2y{~$o&$|%0sy2s&P1PcrU*kot&$8OhyAPoi-d(Ck?`vi#K7(iod~> zG6DM3DPkS}^e=PgDl3rhJ8gC98Ld!QVvUG&8s)3$}!PPUzVDh;Bc;}O!uz&M2Fl1f>Lu~IrN8?ueZRQ%;M<^PnNN&~Mfrsw7 z??GsK{|dU$gbw89EKGn2*lO}&JNO}zqBZ#S%@-gsA)2AILSWmjBY5qDAK?h=0b}M> z&=>-Fexwyh%`R80dK9m|{67Q-Q=m3^l1Cj&EPBb_%0rv(*;Y~R)0Vd!c0bAu! z)NTF;CJ&9qyx(8N!r~sbv!3eTryKtDW5$@>|WJ+I+6w_Sj1XPqymCp`Mk(;pF@)+Ta4e&p8|W9GyR z)NOedwu+q7?g#Q8&iYajqDTG_ibuzU@j_7O00vG>fC;ogKzMYS4=qAS!#+Gd_gWEJ z{%p7NHgfZ=HV9~ho8u)@hoMJ$7d-ds8?a~0f+=xC8#w~wmeyJ>mM1c0Z>&eZ0+FCt zYD#1`nhRtC{z8CGziQb1Dw2)4c>1xcSxh|Cv3*1s|EGR;6%6^`!hGZ#acGRk99=!s z0-1|7mx4Z@ahQZ)oTJWA|T* zq~J1`_rEVp39M0zU4-@t4hev2`-)-4s<`U20i zfJ4em@|ooCkpAyAd?Y)`t216GarQc*bfS^ z%ClzZ%>_skU-UhZngr#MLBUf`^9ArgZHbAAu+&$H#jwUh);&-K7s&(^PrxF+4W~!f z;=bFav9Kttx9CXBKRt9gG$&WWd~%b*tdrjyTXjCVbyMkRMJr<6$9GAFt%j$-5xxON+S>z8Hz{mxdi6y1%5rdLQ}8}HDyRnPWDI_G>uj~ zQu;52W~6t6U8o`_CJ<-@tQALKJ@TcPV!50*p(wobd!#4g-dm?iyHNf0%yWC~NyNn` zikLsm&Wx3@U%VnHJunDc&8kI`;g>a5F0_jJ!J&XWmV5tNLK4Ahq&cR7C#?9+FWr+oq3#LJH{0nZaK zpID6;V>#wrG1l{yaSgH(pm5L+-!%iaV@qMH%9XWjUl*pWJ%;Y$v)NP0E8-Fw8j7gs zSifHQ@>B4~rOa!eLWsE6y-mw4=WK z1bX)DdBzR-{`88Vkhy((_vC{MdBq(YjC-oATcz%C$PY#7S zcd2?C(adsN)p5ipB*ADjHoGeCOYaFvpI*?_<;ZJ2m&ODXM!=H42Js>Fm~p{ig;~bt zbcDb?w_FHo-fA(sa-t*I6_v+Uc^Lir_4cSuUK11=W|_;6c(gHGI1}(C0jH$~=A3UO z4=)PC%}M=myGN|!EOgg1Z#bm+17((gXL^dj~J z@E&ij$wk1D^A*OA?v2#0^2ZN%)xp2LPhi^Q!H6{0!(6z{`<2U8qnMU$C_92)y?V)2 zVvFT?OO9$Vnip zdor>z#5w4BKg(IH*UGY0)6uL??5W+t30C|-LLaYf!CU@gyv-dc(a#traJ zg$hTs-R>FL9y1AfzCyD3er)|ACEg*o_L6-VJlOMIh+Y*Gib&k2Pd`{o_bU;5 z=WJRX^!}IZ3(z@+D$i}LI0TcX9%DxIQF%+)e0K<3b?Hbk1-4pMBNm%DfGapS2=Vdp z?liGe&v;c_$kI_Gh5!|NJ7qLyW&%nf;5e}ym(Li=q3e~>As)6PF(DcQ`=r8>zt+Pw z$ydCsbPt9N_54VMEcUvf3>q{D4J8Leuur>u#JD6T-~$45a;ZZc>OFpRUmw)YRr-p+ z#9PaMh+hWdl}Dt z$?&G(B78)^X}3r_-=>V~?;{JiT3-^lbjl!Tiq`nj1h?w8mK{X*9$6j@?$z87p9sp3 z!D0+nv9Gxu{MtPNS+7(KXI}vKSb6O!%gL?io!J!>zLrzq5g0K)X7|p`~);58*t(He)1R0C8~meUa!OGVItIe;b!I9 zD}0G9C3`VgEM$8t`9x6Y5aho7`U~G;9AKD{-8yw=G6iam>`t(eZzi=*5bp%Ea6M+m%mG^mOt;$7MND#Vp z>*iIaJ`)#e<%Dr!XePe-abf~$Az&%lgFd~|_<(t}beM}>0tzFbtJ;s@6HDN>vem4DkwAsHh9QT>=9F7L8;@t-gvH%3AC4hxilLC`d-uCY_dQ9 z?DOy8aL$QymYb9qCySueH#Fd}Kfi!GZ@IkPg0gt|T9Cb0UvZ(|2?}+<@PTPKv_eG2 zRfwS2DvlyNJQT^v$>)53$o+s%1%(xga5rDfxNr<& zVJ7hl-jsemLmIUsIQO!Cr*xahNN_G`^2N&zug)0qVTbfAk4%dA`bY!gQJfJO6xf zd|JW!PL#q~nSd7vR3Am(tkXZpy-=$xRXp+6*KpSZ&tUC_ofv%n91I|~u&4}we&$v589rOeJL|fKv0?M>rhKj!Uw*p^ z6EFIe^vtM9H{;XKm*e$!7D;6|h1;yJ6RoMFbxTr2WL(-Y-z6d5tD78~^Y{5^g^ZbY z;bg3R>hI$F>Qq4EbB$}xhhH;+CIXte6G%@P+Vr=YUhLeRjcq#);H&ReNoHHEHdw4y z6r3!<$VoS$rlua(Uo#z1k>NOctN@F?UV$awuf@)FA0w+rH_-Fnuh@u;v{R#>m8-W% zVzhhTVN97g5>qCP!m6LQNYikoWtI5n(+^5@9XfJC%3I%Hmi{IIqq0aq@(PM@!Gw{h zt*ZwK$I>6x;n4Ok&^0+hdiL-Cepgz@>D#+IF1u&~PUIKj+FKt-T56K8GhdvS;RJo4 z`#SaM5s%~h#QIi(_rjmF)MjVVI?T9ia!a?oa!nC}lA4+-=3ApsExaa-aePFcS9+c+ zX&(Xc!Bt<8FFweow=bW9o~94r+ituJufOm|*zFF$RGO}kN|vhTR>De1LR{+F5@3*EL{ zpPFMAO9#;_hXuFwnBwm}@|zpQ zF_c>AuopV;<^H>`!FLP)dzzR~Bgj>lQh7z&ZpyTl;}R2ko<8)44e2AfH`A!2t#s=R zmoM7r;?pg1ERC z?;7rb+InQ1zVp9ZN+MFbcERek+pu-Ver(;oA2b5lfAFYOd-HXXmdv62XQqVsXz5aY zy&0h)t&4HD)YJ2JNr-LC_scZd9x<&(X+Ymh>38%04eXbR9lH)TzvR=`i3w3?s3}LV zSRnV!6uuL-lj|_`mf!l6gJMQ2LBd^e#PAVfp?;%JfpFzapd|w2N=!%lsX38rvC(Ka ztqFVf9YMd5S7ZG2J2Cgk|KZq)lhV4ybI&|<+Hz2Fg8^J+m=WSk?&DMCf zcuRpgO{T`Fu;$PE@-^8Q5*7-Yg7QVOilu{Su@;t*k>QK#o9a}|XoW1ADi-RuJo{gm zx?Lbt-uH`cO_caWz!PvZl%i|8x&@`BPNa2-?8C>f;QfDK=EW15qAYLPx>tJK^`YKU zUQb_hjiz#spF~3Z>7zqh8&X<|iH<~tIFw5le7ddPu+v$zUN|~6u4#FrzSinc%m{@H z%*@P0Xs}T%GBpl$TQi!kGXZZC&{-=G8|`1=M95o;rnktO>dKjuo5Y0Je!pUagt+0C z=_haVrursN7$H?l?!wK}!9&MD-eyWm39;c~gi`Kxvz*pi)EDoQmJqyN`56@|YJ@_z zU2^d>e7@`_Ao}ci(=%-6Kbe5n2xx3IVsXCFt<`IldblDfK2@&_96XeZA69O}1;P!O zJhnbsumn&2;|;0y2D8P(RqMJJFHB za8}oPy<$t=&lov!Btk<&y_TxCExr)Y1f13y z;i44eCWANb_-lG+eF5C8O2)@t{O9iw5gvvqm)(Q#r11z#8jC;8dj%i9`83kIB}>sC zWvs5SnF!j9Vj63`%6YvdzhD}f7{~nlu6tNjWiK)7# z4#g$qptvj%B7)$ViMFhhk8evcTj=_xEqkT>@o~`@Jz{|5!%I6NX{TaFTC%jekak0c zhlPNip+pNCCZjl)Rz#|7o=Qq9P$j;))6PWl3@9tFl6;KGMT}On=^j0k)Fn<51y^2& z!y)asB>P=)W?c6u1T&RG!NVxD@p%{AirnJ`$UnHm^@z`ZFJHMG3)VJx^>fRdw+3m3 zY&`z^M||FXGC*;2U}V(v>C^D-;)h^Jn;`<{_}|JXwULv$2-s~Fm`ta;Oev`a^g&NQ zt)(srt*;!DuP=S$KQlEK3YsrFQ;cW?(^4WaXf6m8J+gTONfL~fM2|)r&FlDf>CaLu zng9LkG2x0fUfSYM^^tJy-g{Va!NX{Sj>b{h5)uzxbHbjt5?4NWw};Aaf3b=bl(4Wc zj5%)%HXmOLQ@VP$Ew{frIEP{hhz$oiF|p6ddj!Ue8i@M(1|cd>OQVtAJ=3K)F#me- z1JsG@5B&OC?-}DpO?sUU8t17{(Qahv)FYPS-NJJu9KCw=>Qp&$W>(GwhlF?GQT}9h7airIxvQ|iVB0@4Z?@>hyO7GsiksyxLGoRen+-82w1iVf_ zLxLj0=yGBLtr3up-PGt>yDi&w+G^pb$j0Q!zTY-~W}_4>C}ix-H(m!@?zh5)k^eED z*?Ii8_XtRW@=M?;@72mRsfmEiCZ@Y|p7#oL3wc2AWS#9j9~ z`kVHbD02O&qYZK+Uu|6j9GceMxZNn?oR8S5PXM*YF=dK-kv-2@pv(^_UQnpLH_o{R zPyhYjFm@Rx!bYpYWSP5(i}N)BtuYiewQU~+;%gh-t^c_fKEQ$O+*Tha*WLGDf9kaT zA@uMUy)G$fuP?wS3zv!GB!9yEH|MolpFiHHs&0T&@7Xv$_G61NTZ~lbymcieReVx+ z@7`U!(Nl3;oH}Dn9-~B#Y?Hqe(1)Q~oUQ9Gi8eXzT>r7u96D~1cD2)v`FG}jEgeu# zuDcsnyesWkZ*Q9)eC#<{Mm+7U*VoUNS2aK*zJz!xS%sIEw!T1->nN@^D}Fu2W$2u1 zuY&d1GC0LP0ZvT7+XU>oaPgt*KY~J08Oei+e(0e2VT1eNlIi25c=x}(?|K~0Ie};X z`IfgatGd4O$~tkPV7SNH;5fbnSIxd$gt_s&6F^5Bp&CJ<6J|2Ir;AOHYdhLj&c_5g zOu%W1Lw@0@HHZ!uq=Jl3|KGh*2o*XQzPa1<-u*|U5GizWSC4@+(RaYdQv! z&ilP^<~`C{&5m8!((BK^`cY~zt*_AG_R}xFU&>4QDNfhl{`HO`LkxGMn~F**p$+m( z3^+RbLvJa;gz@7$Vw%_Ts76qT$hB8r3G4A6c~`yH-X2_$ivXRjlwTAWfBY%Dk>_V3-OGgIF2vFl~>GE z^yq~8PZet@t~VNuQU(p|i#^Ad!<2E6>oWgl0v;!z3ywueo;L#mdb~+(R*=%QadG)X zULi&f>xU=*_BzZK3%0G9FA0e&^W4AuPnsJ0dEItt%IwwW9~TSqtI#Ve4fL8)PHsM4 zeq#Yt#v`cHd(0h;Ic>FcWMI?0hp zqm$po*HW|tVY*sOoG`(sZFHw{RgG4tK{w2~7FKZzjlI6io%ZoFOh6U_+K_luR8*ce z9VAP^3N6GH-I!ctDba`jvM*jbO<3sqp#E9(>ooUd=|Z#rQ8eV$OW(kmhuen)2cc(X zH<4HUqJ1L26sB+wczcQUW9yMGar><|slq*5RihPpkH*Br;F3!&!m@Q=LD%P&W^Y@5 z$pkz{fZW(kq0wTWRGHW>74Nx<6jE||MYUv!Yx;^l`04DFWk0UR_MHbo(_&k;?UNSr z=|PL7^+2yA9OsW7Bt;Yc;FIsfR9d!lUh&rL`%zg%$7gzWPlq><9?vg-}I|LgU&*s z^H%=x)Ne3kpmM^qx7CJo-8-CH3dk%d4s#_eLQrnK<;HVWubBI)8=(+6+Te4`P1ob~ z_ZGr1Oq^z>QKc>_=1stcxJSSdl8Qq|^D)-X13A!Xi7S8JBJC^bk&z-z^Id<(AEijZ zE5CaUeD6!Y@c25MTLgW| zlD?6NiI8%()iGUDUaGa^>)C-JCJqmURE26O)Uj`bGjVgl_XppWcDbs_8^AH@JBBoAc+Kng^{CJ?vC*&VoCtSb=jkykyT_+$aWug?rg^pn)A=tQOPZJZ`gi=~oiJ8~_7XN&;(NC}VQfDzB&lZDOI(2IZsjoZ>$UAMQ%z8dg$T zf$kZNv0v!=?!AYF7$27&p(7iq3_4K$WMQc^H|BCNbCsc%-w!`IA9Y=?mF4rt-cW(M z&Hun(p7;||Q&ZJ~pvg|v3ktPlLhb?aETz(-x0bA8^=#IRN7v|k8 zf8kuB>IhU;)!~6ZzKe))&&c8}WZwHWCJsx(b=O|4Iu>NGs;9hM#7B-CiJm=DVLA4_ z>k|KF0v;itk4QmHO#@2G0wYF?M9KFN#}~j-v=b&uc0M*R9%jQ$QH5fgxcK)^0oy5Uu4#woPEgx`S}W3~l=% zH|{!Z^{|LV`TOs=ORt0 zt2Sh}^$6b!00K3&4LEf81dQ?hI^9}p&bJsfau~9*8spq_xB)Gt#^ca7{+In6R(oU>|kdwceD8T$CpdD2TF6foxcz;izS%E`2?6$7{-gLhO->=#UeRNM~gX7%K)ci?`;dS6cTr_mi* z&kPzg2%|;}hV}3Q)pry(KzRgcxzoS1;j1dLtJ$Bx}O>`p8*SEUyB^TvI! z8)Bi4Oz)71w(30CPyC1ne{-)`f6;WP4CQ269QC5`fokoH0KXR(I>GBcu`AVb_zQM1 zR?fT9Z!Dps6%479F@MoIziAUUq-6qhFw94ypLOYtq5oTcs`aZ*yG2ZUeIRYjj*X3N z{XrGq@q0m`zUk4U2dQwvj?_n)J~3Y|hZIfVWKlU*h-eAM zj7!>Wr^8$U^ZpOTg8Y4mi|e4val32vT?UT{N_OU#;!ug!d;xsd;N~^Q$H(J=`|p8y z-+Q3b)HpE#xd<5J`k_Ieh|j;}%$PP=1O|Icgv94ndnNak+H5oRz7C6j*oNHW z#cDI1?QMa;kE?g1Kz#WO>Ncr`yKP@GXD>w0w0O*#HM8vp{pfB$3JUe#l~-JW)Pyj} zzu1rZlbhmW0`yhZm_8k^y#FH{4yTW;;p#g>prW!CA1+!0Lyy^_7)`s1GUu;{zUCnA z5v%g*-;CmZ(*ssev|25G|LAWKW<3fsN0@Z~9m7walK}Y@7uTAw@Y{{&e3l0K&$HUpe!TPPTI}3=w4=pvJ`WQ3^s9B)a-af1 z{eRW2^MB1pzCnaD4-Y@|0Cc*JpBwDKrg^n!7L+q^lmGZ{#Mb&=DWRy}BStDtaWVj< z3qE;tn0&qfjb-xx1CjvE%^UmNjOSlp%9|k-_6}IJb}yE!+zV5`JD|}S-L}r0^Bp2A z2l2%J&6PY!-OlL853;zl(7CjCg-*>$kJE}#%X>m#94$P>mzY5N2pFO>VeEYq{_?NS zSybAG&SOun*|;C?E?ke`ArC@pigddkbME&Ds@{qJ`{N%F5)$HeK7W6Z1*Mf9rlIVk z4?l>+um&{jr`Y(s8@d%#e7PM246(go$h;DNec?+Kl~lGPCqF7T0h)?@Q$+m?8gLhM z!R~u^HHet?x{`I$hQ{#laJkJ=b$Ki(ZFC=nF`oN}-yz;qj)sE^+jx?1F#-1p7`qIE zGxbvZ=^vjV|74l_8Tm=s2yEJR82@>FxmZ8B3;M8Rx65tFTMKRBPx#9de?m-*`y%9a z1uB1#1?3zaXfl~3QHgXMLxWh1p>8lIm_YSWa$*^Y{{JWD*E5>)i&^>_HZkW-wZNNpqx+KlUgL72DBpbZKjk z$##8fj~~XfFMw<~RZ);JaVT`%ufmhheU4rGa#dlo+Kv1|gk5?2(=|d^?t?BY$?fJ? zigvhp)|%Wu1cjmtQ$WZ+&3z1MX=yE# z&X-&SSWsLf-27cvSBEF({S_6CE->}FM&x2qaT6Us)&>D7KGd#PFls;&ZoPH_^m^Xw z&<3t{-J($S|9tsd6kCKFcdwg-o3O#{{O12}@7#l$j_l6tA6vESR!yzt&P>hf=#EL zNyz(Q9^HA<@9zEGc0!RQ3Ax>!^zF{CtJ>Rr`<`=t_j4|ObI$pl->>NlpzXA4$}1^p z{Oc~`ZXm!eWI=JGL(OJ0e)XGI&^izV?aUpbHb}YA92c1)1e|sYMYG+HNO=eT`S}7w zN3l1Nr(IuJ-HccNPy~6>YG_i{iJDa~(^R+8@HPJZm;W{;a{hYCS}4zg;wD8wRDS!) zYiQ6rDV+H($mB{lYvN)|U^oGD(`ne6PU0s!*CB6y*6<(s+qeh_3D6D~euctQ<49uCtt?;>wbGJgETN@&#MpR$qS{Y<7!dP6^c z_s%hN34@vvtEfqCt}-b7VXFSN5r zV3a`~*2u+~z;^_kb~AYy{neFv_Jv&=Ff%)u(TqVr@HG;0#((h1mr$f^fF^mBsOc^z zg<`T$-uO^^Gp(Qe3wSf*7_dkCY5&XHUu0DojgE-(!qfcWp}*qzsS*Uur7gL^aUx3d zrA%OK0#?Hfn5zyTZ*D4fZoMD6VC~oirr@t_?cI3uFQ?EkKw*V*o`5_kMid1h@rmi$ zUgV}l;vZi4DO4&R7K#G+_lsFj+@Ur~nbW6>@!tLe6xVzgRFTt1xr$FS0r3Qc6?|)b z5p3-j@MysjJh*xug~8=LWECZRQcA{(KVa4O7UQWM zk5h4WghkAAvsS~jDd@Pa+82nUkOL!QD|g?*pj~hE0*2|wL0+p+l8HrCyL8) z@=OJklrKP*vI$CEf}7L;OUqeU8wx4(_%D!~o9lKxTyQu67L?&Uy!vfmU;zL5>g#Cj zwv#{O)8x+>;??@OGA1xYz}iy_NApPtF#snq88f3 z#ZXLJLk=+F7T<>$xsH@H-ccT|Z*{`fb{3Yl3rJ6i$GQh{uxMTug0$mLSvb7L$^Ry7EID6V zMWc~#Fko^(p7;PXv}PjYU-sl>8~m2mPWICDU^brH{VaQRc>;5kvMea0fbsU}va&L~ z@z&c=(qCAbRB`bkylo5D7x)C6^q060K38Tag|Vv|+0#?7V###Oo0Wm^FzKZt6cWf> zuBb!FS2bww?1nmSF66O!P(_gEq!&D}tR3Z){OBN_-2OP$ty>%T4}g#EkOgHD!Rz&U zS1gwvdnDA^ z51lR;Dbr#xBRv{v$uUSuh(cUUI86_EzR-WmXr|Fe4_eyvsB6-rra_P9mUgm20X6O6 za)f44K(1WR)<{O_Sf^zGrtABV9IeJb{d^B^iywuPH&3&mc$21xx(bVq;m}7PLqiEz z)bR^Ml;lg9fENkSU-0(9cGw15V5hg-*h=3!V7b+cAf1jzGvNpg38GPq7E$4J)Q!9@ zV|O^v(`QC^p9y{aM)VIFVbFI&h+CxAML`}M4`*;P^v)f$IaJ|JHC#p{0TvXIz`S{ROG^u0eeDel$fBX1@fhSDEwRq_b?Z#bhDLYrAYi;_G}Bt2BFG&ly^(|X+N^rcN4Jnw~(5Aj9|qecK`Tk-U@#= zBFUa%L6L0NjL)!3moDLtf8Gb1CIRZKtrH$VX?#}kRZKuU0U^?(x%LxSJFjBfwyns2 za0AbAiO1;E5-cb_#qXWQEfx!kjvc3rU!Oyju#`5mEx0W9UIL0y z+T_zrz- zo{|rB!Xj!z=Jj^CLMGrY0SirWIT{NQtajjuA8o^uB}*t;1V5wh@{~$B78I%U#01z; zQ&WQjA09?seIrz<4?>kh$-Fr_XS((cIb&Wy%JqSy(*c$%Vxeu(X%K)^r>?l>^(Kc#KCR;t~2q z2^JK;=qAsN6e3az-eLcH;1GHTtxzVfgDQR@WE|(kbGTf?$OMED32iQMwEPV+a~HNf z`UuvnSuJ>Rh0i@9^=P^8C=$jHd>ok864q!VC4nRMjR zYtUdY;P~+qICZ)h>X`e0gw;@nCP~jwwn&l$gt+sLwi1|H&LeN}A_~T}5h*DtlC+hL znpgrXC=<)quWC1$Oeik?0!NEZzzQv#v9x)T+>d1njbGgd8#@UEoOYPW4OrfJ9`=D& ztXsRz4$>gwun{NyQob>RXu(G*ud{(h(;>B##AeB3j> zZQj=33_DF}Svt!xbLM^6^!@diHEWiH-GE>E^xJ&)1@OgF!{Xu#>VDMb;NYNZy+sh3 zTV^L@amy$KOdez^ogdu=o9YPxr`-x`$7LwGF2H8&!uqvqTwYu;F)^N0%Vh(Q01HY0 zcIrgklCaO^*ojlPRC*EGsOfM-&4(s#E_;hj6mTEb>+;&7MDI@hWt!%?M!9)&urYrf z=FguGxm@l;%eZMN6JSA+a_>mgHmlW&ii!%9oIQ`rmr9`yqs4p@7j@!d$a&-<(O$c^ zSx&o!cDR?p*;NKh_f2Gxh^$(%9E%n$LU3^Ky({1=raS=_lqvrRcm}{|G@`V$6lY4# zp}M*X!ErNTk6Zv%bS^FIt39inOHVifA$6jyyB0D-8BDaxJuxv6D<4>nrAwEB7uzQs zf50`sf)a3D>bn~f)>|%Kyolm6U!u0Q8o^N+u!rT4xXh%zE}WLncRe$_WrwMocD7eS z-gh0Qu4=@^Ct}qD%V;CwGDJm14ZoPbji10;)hj<^Ux3@ZSX^$S;1Av;V7J@R*w~1# zFP7rM*Oy>0=%I0g!B_so~unZ`}Gay%U z(k1ul4ln7nn_=y#hoi3!iotpqyX%pXnt>&Y=FtNCJl>2r92bA{A^{c@FT&)i#z#OH zz1+Nc6E|+$K*hC6)X+MNQWFAY1WlLeGD%?4p$LxWy)fg$31K?R+EWYk*FiQ=4^w|9 zQq$5gXZB3wWM?BMCx=HN<7CN~c}QUY%iCXMRq+ra7x$ci5Y4isrNyN(o zYfZI*DD`(QVA!cO9$vb4cLu}=oOTM`hke@(W)878gl8FKonk2$;=gG&eV+p`iiw4NWf5Y5%qr##@8X>LQ>Fjt02` zz^;mdOcM!3P&B=xAQ!|3vhg%AY1cR`{UjcG8hsdO4vX?TTe~5*=;>THjD1~Dsnv*% zjmPxNbY!HZxE4av($b*QarlBs>m+VSGyxVA(a5-f)CdT>FuS_CAb0`@$M%j+w0_&^ zI(F+jaciI#Dvb_GZ3q-<9mSL(j{v0(4n+u@bQEID2wnm*`QN9E(B~vkQf=(F3$HF1(yh2b`8cI4I^jY=cnPZ$W13htoO$JDs6})}iELaz`9FR|fCa^`x{HlP;IIQnLGXW;R z1en0Y5^!5wCe|#joe3}jCcp&zjQ|UZzx5c~%LJGJ6ObqY78Hqg44cISm;e*-Hv%ju z{?=n`FB4z_OhBRp{6SD6-%FyXvFX^je$7^iVq~+J025#WOyEue{va-Q+RLAr025#W zOhBRpSWqO|F>DqSU;<3Q-w3dv_*;*$y-a`!Fae1YkiC`md_Aj*M0V@8@~Xd;Y`e53{bB z>8YtzeOGn&T{U6yvSRQs*f0P90A4~|SP=k#hynmW^PwSuSEO0K(gT0M9R($np@Gc{ z+Bg(=j^!kx?xbXE>f~zRU;;3+v9&g#aWrx;F|lzpw{^M%>*56fz5yhJ1(e+~&eq-C zwA3>BKAPHf3N-TbCGuG<3Rq=QXSI^|ATfoMpvnI_BjLxV}m{REScJX~5i=*Mtja#zb zs5+d!{hoBuk?ncGad_f$G2K$CPEGoa=zkjMuev#quSoxG(>Fc&4F5erLLw;{92(-_ zf%v~yKmf`_z5lxl$krXISyMFz%{;==A5rsp%i=jrOq%&C;bcyFk6Y;6p4?!`f}bnf zqQ)hCsW~XJ6)=>EFbA5SZy1j%eZ`6b1v7p7Jo`PHns!W9JxW=bS@2eMHy5Cr6)OyglA}v`WA4sXc;k~1yiVV;Gv@C*0&%@Vj zz_zSF7p+4>Py<`*Y$0`$GZ607RKK@0AD68Uns_=mrbe->5vTxa%V-Z1MK>(m`7mw}#))`}`K+ZS6{S zH4F{tEUiUTY(G))-Hf4J;LrN(sDqcT=pkQRc4|T{2E=R_`xK8wui#8`-!5d&)-d|) z^vKpW*jA(deC3vbH#axuk0-02>zQK2IE!3N3}&UPUC#=8THVREuSt(0JJ+1y;_^TMz4HmEmN<;d0^WF zlseTmJ)JYR6#1Dz!MH!vt04e2MUqlM=QuQmRTMnuH^xOFDR;difeX(Q(h{-d(;C9j zB)>Vj8jECy47BBWalO@KH@aNGS5BOZfq++lP7vC>%dR4n@R%ywy%buaSc0CM$#4izUn1yWG zy(6=qPm*+cp^|%b3EjYU`yly?{#=TTtQ~6RceN7D3YXOHkzq$N1B-sGLk}PTX|xNh zFS`^T?3&o}g;*h3%O6v1XEKOo6EqwoOcvMMxyYt3GGYfeRiMkJLx@&jpJ+5fgFXcTt0m_K>) zh(ce|X)qtF*0n9b2l~_DdIw%jLKh;posxquHGI5LB+(TMhyJux4vXft<(M*hQQsVc z5*OEO)ADg7pRu^Lm=?RPUK-{3NQliqj26qv8(C@_Gj_1;O$ty0t9yFQA9f3^46nJV z_;&>%%P0IM@lZLD5U9cdBp>qp6=MPBOA(}t#yZoz99>1=M97hsb@LgmlZoaxfsFXs zBSw{ttlvVR-!AT2E7>%+i*2NqFGnU*au!Q-BAKV5VI{u~x5`@B87H zyKv6;42DkZ(~P5Ct+BJJY86Glno(>s!YgK`jk%E|$zDF&xO^aHOq&zF{=K>R+HVjx@;E_+}TCc-~4MP{59irVgKT<8efU1v5-bfF-dJvb-cvu zW+EH*xl)Z2c~k*86iPhRl=~IoF4I7h${|D9+^fcxX()gsL#dT@-KN(J=T^N10Kkw9 z)hR6?{AS0d+-o2+2hxD;kq5zQMzcm{wU|oe#D$DVTKC(aaXbFDqKRT?X#-uLfAObe z8_BfknK&QBpQ_EP=K8P?Hyr%P!`+(89%~B3)JzSl-0l9BZf=#K9dAvX+APC^21Ne; zOVI$Tjm^^dSN)K(9F(3Q)nZ}2vsx{E=>;XGau>CS2m8^L#om>73$_^6uM!KFXivjN zVRbK+p7t_3UX@jMpgE-K^VgqxcO`h&j=n#jc%5~3JR@6JO)hpcCb}f9U${GLQ}0s> zXiu#L8v0=B6w+@p3qUgKWL_ zUkXEVtHf?`!`N7x68ij3+h3&@7G>~rQU)iS%qBCj*s8v52^f~9o2C|ZchUKk^+Kb} z6xj@kk{Yq5E1hz*^yGMGLAfKXKG45ob4zKlGSu8-#D6zPOV=eOi^5Q?6!4KeT)?bT zXUBBGoPu2=J}%@ZXDSj^I3hR*uSlWX3#ifR+^#g~mM{!7l_WTedrT*XfkGz`l~`29 z5y>RI%fl2Dnti`LdF9GCY&5E*iXGg58;b_dZ}@|{>64UPSS{IVs{HCVhcP%NNI@IV zC}hbsExw!qduVJeOdN#9bgFQZk}g@0JNJvlrvTU)3T%dR%uJs={^3L8p{m#`=^;Nf zr(DOBI*WcuhghUlW3;hZw3W%3g1AFbFQY=2%8;BQF;_S>)zZU^7T-K^rrs#-Wk$l@ zqUH$sllF2#u`w7%!qci+MKa;(LFz=SU@gJ=9h<@i&X_tTmcK5+m9j_;=`0e zHo?{2@kLS$XsdmB@_P%Bj$w~5lhG;nNoN9MVUKhvGaf#wU_}cA<73A<4h=mx5$XN9V363aiWPmT{(q?0;4o(@tYV7Tim^ zNzMFH9ez`xqNzR#MWH37WcCUSMC)k5^^Yg?#>Be1_<_GUxl8P&v;lw z4CyiZf2XTfV(YwJLdGXBlkFUpNNxrzevf*0jJjebi;52mM=r5EkIUNd1NUevXej)S zRi`*d&LrtntVKzR0@rE?yqmP~A%Jy;DDG;Nh$^RBjx$?zNoyW||DmjFU==r9uNzK^?UGmWd5~ylQId4EGI>!c~W%N zMoDvoV*E6$1h!lwU3|hqy`-xgKd3+cv1Mh7B%B4~S|rv<63pzJON42ZmFWz-yk0^% zq6pgf%={`r6;(%xGlXG^g4&@)zK&#KiAN2&KM=)tW#J{#CtMx{zUIQB!H@+_WePw9 z$15_^JX1x;V$&6G$np(_3xtoA=6BX8VB@T!uwi4&+J=TYNr~iU9Z|M2p|8rjsxU5> zVM^u~c4&dV2QV&e;#|&>qU5F8X&*i$dSaODJ2@Wsj>Bm zSItQ$brs5wrnh~kXa7!2EHtyZN_bBaZ2V4}y1|dW+KIk=VOCe1e{HzJf5Op3&+;XM zgttIFjcEfsi^hWc`#PG^VOnqPV9Bk#Z8CGR+5F{0iBt<-j)*mh%}r%!HmE zpW}1OEK4h_lVhyY$$u6-=D{Yvm=VOiW8++6RcDsspKkbw-tqR2i7i=77&)da^V^7c zYBbd29cq}##|Ms2X-*{(l(J&&RHaR3mZoVsI(TO0HHk?q@=|0J6Wf1{Br&TBo6(2% z=>NK@L_#`_EuG7+FH}EK60Lr4O(+`T<+)C1C~nJN`_-uJBxXZV%nnNXB5w~{?HsT;ZF9>T+Ja%@@f+UM? zNENf66q8Fdpd4Au-Ujn8!GR5$ZE85mzXm-y$Bn+;d4Z3OCyP=-t_o&69-b3;Y?3Uo zsh!M7lFk_86_@16o_0Y=7blf;4s+ZbiE>IHSt*bmWvp<@=W_p04o{z5eIQz0G+cfE zfgaDStBk{rBC(rrn^%&{)#q+5>`@96-vuezF~aT44X3z}Wza{#Hp)y=!Ui>|?<@Mx z+MbCzf+K1?+H!7gu3zt7#=Nvq5EdZccT_CYppyD>RZY;@zm{N`4vQc(8vaw)jr6br z&U5NmDz!z;EMzPx#hz&Fd<;!5DG}sFDVi zM@TMoh#+*3pm|-Hzd7c%{Af|63p=$ElMgmtLopSdh=s+D$+RIY%54x=`cx`%tKu3! z&I~Mrm5Ms-nS{q#B!#jD#HrDVR)2zzN&Dg(QNa5V0bJjvVTjMhDZ37w9Sci$VdjS` zOF06S6W}XsFM*^x)sBFY76G7Jc{xz>_D@48O+#^m6`+61)w2+PuAniuu?|*8N5{@A zqe%rey$_r2cKfTK#=%#W@YAWi(TCsFXFsiFmU!X#mBL#gxlGSk?|=(geNaDd{AM%R z3;3h6Vka!K4;|}(Syqi%8M}E7CR{V(GXyEGPolbWJsRvYEX%N!@L*1{w`Mrct%t%n zQCk#Emq7sLR(V+fi_kWvPJ`VM;5TB{M2X9hW6__Y)vf+N9ZG$QI4j(fEyyCM*mI7*O7_FS?TlA(+e#?+nJC{B3 z2hVL7mIC*zB8^5~r7uxGa99b_I*wmtc9Lg z?mAt+Vxxb{Rbw+!m5s|B%M#-qjitI(nSm0Z|LyAgsubo$lI$7SZ&bCsp46ONOY7na z>R?d)rPVLCqqwhT<$T2dY`L)h?nt}g#H_(^M?S@nzCdgvhM*wUgnxT~CRY%S!$4JK z$0Oyw;^)Q+O(DRSgKjV-CfbnAKa9*w`GY0qjqesxv@yF>#_-N5YTeLR${p(8&)Mt4 zCs;i$Ee)LO%^eK}oy%6B^RT)rh0WX2(KqMDjQ{+P3bZ$a+ze4Rj`?g57Mx()UW*E%3H9^fv85Dz6<&N zOcv7u;u1nK=Fw2C9wm>z8l*lIqmN2o3SvH9z072ss?=1L6;5{Aw}@ObE(g8QpZRs* zWH>8 z!~w@u1^8|54gdh=<#tQ8)D?~0rV3Y6514`<--xOOBffftk$F*|iyeH2{W@t{>R(UB z2g=_MWlR4d7C-L)gT>H(Ssr3VT7&hQE(Wr_NG}Bh_I2N3%#cYn2P5h9uDyw8A846K z02E{}!hg;t(^e?L+#?ZaET%s0pW4h%}>os-)#Z0QT~qgcd0n7HBgIb0SAEgoL005 z(Wn|&iJYUd40MKfH^aL@k@eCouQ|2<7k5lf>6Zz}Z^ED*+pzPvF9B$JVIRh;G?| zr5Xa;K!g9pgs3}46e&;6;=uIh08Y(T)6qc~8So8T)GjM;X~VL&N8hmi9)RC$u~H-J zuH$l9>E^?LE$a5-?9L2st3{*zFhbTPN7J+uEoQn$A{X`bE$qA4kJoi}I;pi%G}oe_>ZA|?zIjch1OW{T)g0~a6`yt ztdqW|UhEm_F`#)S5B(chiDzc$*vaS+4(4Vu)k+pbS#b0Ym zVKH0YV6=XuNYcRp00QIRD>lqC_795CP0Qd})8Ugw;9SWA<>3H^OYWB@8j~HR)`5~S4s5kBCGL=?x_Mu%_g*)Q%V_6}oIaiYiE#kjwQ2d}BK zbvxihr{TOr3ax`1*Ah$>9Vz&*)Q?uAyxHSxsEb4$@63WGO;>DGIF;H_?l*DjJ+JFt zRtyOShD+R|zUZNs>fIVBdp^{oaNOcbEB=P{=H_(pmy+1q*ek<$HUl@SOuLOPVqOn) z7-mjln5dP0nHKWi51kP`pA3!&oOSn?m#7k6o#_)f2i*B7*(^k4rSLze&1K`ocTWb^ zWkNdfI9WQc)mB5qOAWh7+|c|CsC^nbA;Ffb?KT^MoK)L(JfS*a>uSu9n6Ob(q!+!S zf-MFZ>Ryn2A!O*Hp$~z;t3kt;nhVWclaW1*BnC-< zouwtinfY#+xaCul0sB4EF7r&kbYA=bHCK0z^J0+L+s}|(L`MpY^8BFwyKnvMmvakj zYi_LI%68bE!uWRtHJ;B1Al<>q;=o1HxyJ`3B>@2zPr*%8ezEDBLvav5#xky2p+IDMQV14oLzL_Yh-j>Eb%<4&o`6dS* zTumZ!W?c;PMMJq{I3Sr0yz+Egkgx!L*od+(nU$o%eIIV#Hy&HZclwLn2;6rq)>m#? z+vgd)I*RBYknEX=hH7wAXp6rKO0Zp|JZ@ICO8D;cqLQuJh8%3b_-i}i>GJi`${k@O zwS>$aGxGn;V|7+#gndN*v+Xcdf61@ejGE9~ox%!xt2d4^jORn1{#;J5$^AQGVC)H;!W?cNK`#dKk1 z=}}sDiE|IRr^lY}-gCmM`TEvFuAkK?|2wxbX63S=5#pbm>dKAaroU)WmM` z2$CG^&g2wR&dj-}^+K4b92C!;D4@+xoIJ4Dg@fmp9X=7E^hcU*Rkk#OMb6@o*>6$u zz-rBOZ-h;(Ncs~r{cpkMdqLY&LZDtA2oEj;$+W%WoJF>+N(YPZ zSD?sA%$+%6Q}BGGk-#VJlTMbsyAsACwCVDM4phLg6A_^mY!FDl8uRt=iu(l{6KQ_+ZZ_=2g@Y zzT&nNtvFaej+-_EJ0}y5gNsn*{&4oPrQdV_jS>^a_XC*?JY_o{ueMdjc1qYjC ziPjGMrp=|>{}kUzp9e1d2q-IocOIY+5Eg{ablNarDR-eNOu`N>DzF}m-w&{+xhkA6 zG-#M4#!99|Mj&K1a@b@eXc$OhKLER`cRyh97ji)OF#vaZ2kzaQD;hL03fIQM%jELs zFqq3Jjn$D-m`G!cW#?RaP6EEXu_WJrz%&RF5X@tKnlahGDyoix;yUD&d1b1_9R8=y z*?bpq5zOz`P=J1LOn)+8pR=B|%;5atc_82FVQoL!wQ`?LQ=d(h@Ziqes=V4^=wt)_ zZC|o8m?I22Ycd!lAoQGHDZuu@1?~Fxc>L40_lXc*9^n%T6u<2VFW=J|fi=NTJEEiLVVgDL2dP+H!4#Lmd4`8Do}sr}w@px94m^G~fln7=RwHC{Ir^IY<>6K} zTQ}Qe1WU&<^nEt~5)zW0ek!D~Rntw8)+3jYj=Lw}UGX$0yAK+xBM&(`VJf6at?$Cz zZ}l`}^D?Nd2Sn^Vx;_NJNr?xG*?=mTJ`4Bmj~i@e$*R#iMFS4LwAb?kz|mktMNlt&rA6o{vOXv8bu8-H=HmKv3DT(1FNzEl@k4o1!Es|OlGl6J zGpG{oMj=j*50Y}OA@vJDs+_jr54_@hX6v-y9N9^e;tFrWggaK;1*OtW%JF}=&5&SY z<6=FJwzPs=a)m|aff?Ks&Du7iYge5hCo>Jt-<|Xdq$D$0B;7d=GfxkRf+dLgG?{B% zPn>W-)|Og_2jt5gRDMyO!YC|D*drQ+CEOqNNBtpXZ&kPf#rYK){~kgtZ0xv?ECdT` zDpZ-=^W?^&g#*H4O1AHd)|3^o4;Q+B1vItkoWzd}=oKD2P7_@Jw-zALG+(Tg^c67h zZ9t`BW*+C)2Y=O_6SHjMv2hnu1t8wUDn+$1ph7WM`oh*XN%1nyK_=-T89n*pyS+@+ zA&3R-!uQ9L{6d{pumDD8Mk1krj+u3BLXr6QsvLgFY zVY^dw??8Um#x^0`P^k6;dz7>AUV?Z=g~1sYo&-oB&lCT{ByNzSkp$O&NK#nH=yi5+ zO!J~q*km;xjCe#SY1=W zWCh}k%$Jdnt5WgSN(_k--kwc&cMsyOYGrfD7-59>Z^abx(Q{LB7O!c(ZP_xuj`q3b zaEjAN?8oj^zUB@?=*y7b3*LLbnT)R^LxbnD(N_pvR({nn#0lCzpFx2Z|J}s5-_AVt ztQMSKW_ZJ$ADeq)dVF#o^ElhFKBKLWq1{Ar|3*Q?I74+W5`Qj19ArXqZ%+Shy+TFt zhj<1Vtd-(RLC}A^tln+xn*}xF%qn~0pRr%nESSs`pKAH}EJ~(S-=aij4e95_8`p9} zL*ZN`WNLsThMk|^NwyDaUMXP&Oo1dG%-ltEq|%8VMdRQ-7uwK`Gc-?u#03)BO_;=pBN=EN+}tK z@#pydR{49Bh%jV9St&Wq7tY-Rs-(1wJDQA<{F=&o%Oo6z@RU)me7@b4L-}m9fCT1; ziS`=cUyZ7(H_)+9P56$S#uPV5MD$)!HwP7knwUs>GK(dR9WgAv_1!)ZmE=S#*n@Y8 zL`5X+%;Za`AvrL?b~mGKH=`~r`m$P(YxiIcicL{^^y@>~dtJUsdAzp0vRQDH?A28$ zF+XvMD1H#Ow8@ckFyfevlue1VVw_5GEu*vJBN~Qc?vLU{nqbM%FfjRKPaynK(9v%9 z&?&|sp>g`-G^NJ=n4HaohmA5=v4ot7WI-pv=S)`MRa3exdt7W26P}0KFBb#G}m}U0VW2MEQF~24>@=nTA<6)Qe^f5KZri5?gkw(TK$mvc_%f|m- z8UcxBV`kCm&%lMu z#<7Y7jcoUq$FxkZPH({zO-+Y}*BYW?J57a1JP7v?FbatPL;?Fo_oVnO?CKKA9 zzyb?`pF9=C6m$}ED*Ot}^r+~0<%MWGK6Kaz&nPcS*g4BVtX%|n_xt1&%+d`7JB)Mn zuUl4*#_sAm2QP3U{uApNUny&u^7cyslgx`T7YE^Y*64}3r43L%a(F*pq)`^X zW9l5;y-uagJeoT}q9!W;u31I2jRzalK4WjhWpn3uJQN6KEH&-7s)>gfpXip)UZ`e zWKByxLYI~SMJ>1@!Tq(I#E~y|pv;__TZk@s4W)1cg~D$o?}Ae?=tG8_w!|6MyiaYj zlSbjW{ynICjXDB+YqY!E3nd|W<50ey@T1`}N{4hx45OAS+qbT{B`ZNQamWcv^`U_n zSNi@Mt`=C0uLF%Bjx3x!_vBP*+6#c&JF>0P3iGRG1rd|oWqZIQR~3jbFu0& zB(#DO=asFqw7?2GYW|qOsx!AtEH;9i2~L zE2vC=29>His-bLb;rt+3i!#3${ikN8ZY`bL0#{`CGGK96G+@@hP$%A3O@7a*yFDQX z35iDhD~&AEk3@Xqj3B}@Q5_)v&tJo?&qnLwsqln-aDAr@ zfRrsuyUd^IMwTg=^W=yx`nDwyMUej#1OZEe*q$>6d6jXBNTSfdM%Q`kX-<0hgoUL@ zV0_bICq~7lFvf0>1_Eo7Wb0f0-1?Z~{i_Yq^ho}|K0OD>Hm^L{{J5X8t?x#3B_ONTnK~j?(Sj| z5QK#48yXsx2>mrOwS$AhZ^e>d@$ms~Z*K(%A^+I;NG42TmwIq;(C+R?0hiYcGgcg& zn3y;vB}JSx1~_Eq?g}N!!{g&Jq>*etmi$!8R1Nk=6UoWR)laVe9i$CrPi7Jv&uIpc4D0hK8U$A`~f&9I{&PZz5~MDu#mSgf8< z)PUmHVz005;Sdptk;As8a)dCkv3pPE%j~ZQFm~4*Cq-;*E)+w={}EXKgTW&5>1fB_ zALQ=--r)XZzWFFOqS}5}G+GpB9glbvqzra@P$&f4?(S|taB#5Q2ubqRj1=8I-M@wk ztz|!rADlKvo#c7M1@1lV=2g^s%$WrhP>YpI3lO4!#*jrq>Nz?he|de~etfL~ie7UV zqj@9w*N+q+&75c4yu5IEvk|M>Z(wX%k4Z7%f(sD-`h{#A7!-p3>%+NvlMSAvq$In_ zzeTCj%x348jAW#wQ;Uls9iET9mJN)+xFqb9v{Y>X#(U+DsPE>636z+ZmnVgX{Lh=F zYaJFyqaH8TBh0eBSvg4lO3LI}DAx!`x9!9Nj*lDAJPf?NopXf$YDR-&W-{}1G@W<# z?6uVDOhw!EcJnVhlDZ3J(yN_MRRaYH;o#wEoZ0^!oUuWTWM*_Ea8QuF{rw)pnyZmx z;0?2q_I7Sw$$&)b=~(>;0PiN^9-ma^x7OOmunPoqj0;otPJ+7 z+sy}Sg@qj%khmhd?tHIA!W}upFDq?7rVDSA>b$k7CC0OpG8fUs@jt33bZJWaDfS9EDAkFG(`SQHm&2U-% zH%3gGZ=|iyex2K6+eVgw22pMc))$^2txeMbXEt^F$u8R=1{V@DW#BlHl9JN%c1p|2 z|BE20qjGEuZcff{U~G_;VSuVJ3aLJA>Kd4xe8XRD^Z7~k=>?s<=FY+Ud@cCLto(Xu z`y2kpwvgK!o7`4vS0~Jt;2x3d@>)eG-fGC^$DY{J^d!Mh9bGUzsQxLrU%dwKZEq*| zxL>*;0d8JHV<}HCb~{VFP7UvXF1NSEt9O<_BdlZ+qO|nwcRB+>plY1{7+FtDM0#B) zRLN3i?&v9A3q4Oy?81Rv;{TX8w{8W98+;rbAz)C^IuNuoHU{s}{|a>3v8Rvc!)(O( z5pWTOgNBW;C7dIl9fP;t^&pI_4?b?sZ=eAA3Qhi69#@`l+-%`#nRNN*>8f1c)ToRx zCDrYh)yv*i*!!m@+jpoepJVx8xsvMZS{$Hj`{Lp}4%z{0SOs|f!hWh$Z}?!)w*T?_ zEdE5#^0?kgMfm(aSbZ4d+mCb}M4uWdf2;dt8m`UzGhq1HWzBAzLkbT@RaZS!&gV|{ z%v~txG~%57v7A$guH$b!-rqNHa!UBmw99##sgj&;+Yr$${{82|g6i8gqOW-LuQaEf z{x54TtJckjS)SDfeZlS(YKdioNmQG`T@?*GfuHsX^W1*ku}0mXJ~io0D}Jj#AaSAb z`Ug1FT_kov?>V*a8@22=R`T5Lrr|Z>207GSN8q1Uv57CZd83`vC!hf2R14eKS`2V) zD$R4{dccRSiK}+Z_&z}l)x0sY*BuDwT5xN{6HMy~HcPC2ZtI@biep>BvCjEeQw1A! zbFPyXCv*O;iv*V7OBMiU3q~%5|AaJuU{H8dWnyIwUexo&^Xc*(BYG<=GN zJD_Xoz!@0hYT9+}dPZy1e&dz48->fo`kDC|uJ`85$Lna=^2Z=O<~}`Ay=%_k-;WNa zHDZJiH$ne`pOrP~fm4OJVDR=vZUnUh1rW_(?>-2J$Q{zS@Y!n%8e4!6WUj%9`-Wvh z?h8ExXm|GU75vv(oGhuEwqwU8a)d&zri2i;*}9&`6+;+>sL_SKeN{i9pOKD?GJJ|! zoOY7+|3}{t1L3oJ5!=p*Ak)NR zg^<9-+$hrkTh)BB4#M`>f#nhM+f!whG>?<(z2Z;nfC zzL@f5_R*$zv<$j1dDo!ks%5e1d7$*siyyTp56X~+`&oIbM0^&yVz zuGC@onm}K3I!DL#e5Z5sFar=jV{5D3A!yf#1aO;fCsm+K8?nC)`kW!Wp#^Z@*x%lx zmydIVcjWN6U9|Ax;;0$)K_u^#mPt=l>4b4d8a#FL-tlz7(sg(Mc1JmyhrQq{+vfn| zqd&3E%?-Wa#6l3Z6O&zy9;vDb)v$(6s|M_1k1e$=S}{-AEaiV`3Xl%kzbzTydeH=8 ze5}MIrdR&FZ-uqL)5oOaek8l=@&SLQTFjerz-z>GR*&U7*DZn`9LfznLnOYtk%GqK z4o4Y^*$T9Tr|meSOrc#5Y$%%>VVA>=(SMxK+gUf6XXj*ubfymGd8gaiPnH9;P~|LE z3~^BH-0K?SiiIHN^3~(Kdp_(6{{R=01sqy4Y&*;ru=~Da?-iYolIH?t11Bf9i^)lT zTI!xxo3+s49yO$;d$mAa4)FQ3sOFnND}z=$;)D!~v1r^L|3$q%N8b&ImZ{*!&31Ru z=g}qThpq?8pLA~!Rk)%0(hfBq=g;(d2E7A=x79@1@yRIvxbXB2gM}BeWdktSD=@^yt*XhpJ<*@A}Xx+6_K(s^}m z_HC~pxgE7VDV!3sSKkno>!pVD1<(cxajU1*BgRr)bUqo0St;>oki>Ic3r%3 z0*Za=4=`b-5xwz`+^bRuJ?yb>73bU%t?gvmfJj{XCiuaI+aAxE|VcpSp<` zn|@po(0-hhGnT=QX~!15jOXjexGeukw~ukzGT5|D!1svu*45=n##;VIY;PXp>h3bT zCqCv4_4Br@=&DuS?i2B%=LjWxD47cZ0GR05Ie$8F#qZtJ!3!#Shk80&^aaeXL{#r{ z!w*Iy=6n`lFtFDnxvF@feR^c0V4SKFLD_D->Xq}q-U_LI*~0YGd;g8}F+o5`rv_oQ zzWX$-g8_y@R(&bL$4T%a=e29b1NW&pZ?xush2Y~I zHqmVgL$;E_EDs|4`Q`no=*CsQ!{daRk(U!#{$S(vXtmk>dEo(3#%4PR0}Dz|raRDur& z*iO4gyk^re(LIo(&t2c$u}8i@P+Ta0@M9O`BgO}UoSq$m!{TPN@8`kcb!D!Mya61T zuMh6#`vu}vfL^B`jn?GQD0dHzCz#uHgTY>V9Sq-x1H>7dUhw>L_jFHk&FQn*Roi*Y zcIV{>Xw9{k`o^dCk5a{}@fY8L$*i@+NR1$^tE+6ztLf7PVyNz#0I;jahb`AGZ@5Pm z-tZ+i-paNy_}B6D*@yf7Z2>pm^TDRB2edbm%}AM3)o&i=U+L~I2Vxu8wm*Vxw=OfA z_{W&I{xdfwWr*myZ9}-D& zlU^-4!J>za?=|A$A0;-6Ih8wFs z1fxZVgpIWyE|)3vUgb;LZwskJ%KFFd8-vMalkiq{r1I)N4%iPt?X8Tl!m5^kO{^8EY`2WiwxWu4MNI|wD`!R@ z@jmC;A32a4+i%rYsOZ$j_qw5TFf`OAs(LBttQBVVE4FL!x>W06WaK3z8p>}vjJY{- z?Ig72V-o{}R_yQ|dz>K7%+3<`EXITlH^ZH~i(a&qsV^Ajp{jj^aMzj1%Vw|>R<&;> z(yh7FnXCN5eP_8D`Fu(YL=-a-gxuF6XjQuKlB>~f*Ce3=EE_LTs_A;=B_y;<(T@28 zS{zKKL-yoe58UjI;XH*tPK6LCyeC;(+!`bS!y=0qV`6x$E$2L!iE>^$R5$x_?EArE z&Y=7btlBgLl}}>sizKScE?OlC$;o4z?0nyVWnRR9y*Hf9n=&&Y#w7LE{-+eUJ6=M9 z4buc?jI{g%F22N$?LOoP_z2(Erv1!CoSoJ78_4G>y$^xtEX-t~$;L4;-UkKu%Zg;! zUsTl)3OtK>*hW$I}M(Vgc_qJD>U;z~BEgHOc zhYa~uhkH-W!&#g4BOPe2BW*VHlDg57%BN@=zUOUwJeQpU8$PdiH#Krz+LD|073K0b zJNx>dg@GvRQyW=g`y0OGQnV56j(ab1e6b6g(RlDE>&9JQU;mehF_&Plcq47hh#Q>VUcU)) zO$Q$pwfAb3(B)0YO7K46<7@HDw^z=x_|O7DZFzf<8zp!^C<1R3<89gZ0{K?Pmb!N; zcNa0-{N($<(lYH94fyri_DIUd%lKsWZ0JrT;bc^C7Zj6kP2o7BZv4YZ8KJ92&c(fx z%(hFKDui;eyzBC%B*D5(fH}-a(eC`vWYRviW>={8vs?Gs-n$^9 z$uv=a0EXi+Z8iT?&Kr5{nX2VcO7EAi%}eK7$t0cL^Xp?6sQ8D9UXmK37#S>o*7;N8 zS}S1|=7(aL7{U-~JT6M@;vxk>TPr_6eE!jg);3c5`z~ID=w9;XNBIwpia08p$^<-L zcrp}UMh4P=wyd}qRO=`*gvqASzOGf8yj>JVr>$RSr=FUY8*L)0APxx3H(8#Ybdw$R zT0ZZSdP$z0c2!=NuU(0KMuKB=+wZUY4ZPWN?XFVoomp)v8(O+_>%REEYOd|EaCk^{ z5PcZzQ2@EiUYTAyv*R~sQm-Py-(BuhEIwb>YZV|Awkh4W6tZ#Ulq*n%bQ22^H>l-V zM?<=%h&(sMFTt{Iy~HYI)`pvR8oQFS)VuBxoPTCKDvP9yuD%YX;1*zRS*NI^X%d?af}s zTpots-*is$e)CkKJj~MbA?%F7NGtegsw%{os{wiblZmddk2Xx>>MNs*C0jAa;J!VL z%^?Q?a$|DJLV$6Z6`H~BA{>nyHGHAxE2NsO^o>%--+Zz|_TpKzj+@U;UN$mjq;sok z-#dNx{diD@ZTM0*!Zk2&C}uP^59teOlE?;!gSUv~%u^t^@4d`)l@Ex>=1Je2KzG(aQ zq?J6v+&!g-$K)P*@`?FBb86SPlK}J!T;O3;$pO*3?aow%yp`qtFBy)zpRp~I&#CFj zA@irKhIQA5<*U^Q>>~mDv{W59&dzAf@;ILg&x5X;{GiuT?+PnnptnY zXT)<*0XV2`ot!pqiwY5gz8f{)d%JFLR3+`sL|r#3+f6`fdylQwa1^0I4I)*UhLzrn zZYn*=4iUuGZEI>9)a>*+AA5H%wa;Z zz!XHGT(dhJlD~c==p9IVVpX%n%zk_QZ!JLM*2T=}mHH(C5*I^o?9pBzn!=gemfp3p z8)&)=_7AECqKYP^gJZ)fosb+Y?!UlL zny0QmwC@=9ZOg;fBVM%OiasEyZ+ zM^mq^-@gG=s^!B^8zs)Da^9~YOLY7>Ht^KR*&)a7eO@Lcto!=961!jm1X=DR;q7Qt zIbEhoD{EQZFzQsDp#)uhU&^&%^X&2baJ9|fp*!#O)ONOFao3tfBErJ#+g}#(-7B|bB^mfS z)+;jZ+FO<4Y6!^pAoT_!9*tC`-#t;QHa#qhJi#5l9`J2GH_U>cZ-m+fvX`{1+7^R_ zQC2939b-H~&gT^-TP9mp4L{$$Y%REc;eGmoM!Vr~kVRBHUpdf1rS@%~BW_p)=C^J# z*d5pl?85u!*}AJ2n#n@<^=8F!BJ;N|Us#-e=_>a_Qo7imx4l%FoOf)6w;F<|K~Pb` zsxSUT%a=fj-5>;q&i^IoA!n0$`(ZBx{WTxkiARs`**wF2}A4@+t5r zTd-v>DCoFCZdd+`it_n%AR)bf1RctR_$JP3-kbLqKHlCQC}ETw zmuelzfJLvjr!h05PX;q^+Rn7){yuldUmcHV8_jJx{|{GZ8P!(TEox|?K(XSkMT!&L zDW$j-cXuyNuuz=h?oh!kxI=K47Kh;OF2Qf!bH4Z7bMK#wv48B0x%ON#<(YGx2IOW7 zV_9qR*_K=~6;pn4tqKj(&t8Z&R(zH4U?j3H{PuJI0#?N-DzlqsmCLPBwvntrnJ7xw z=5fBkS6)0J?80ty`Vz?{z#KSPO`fR${zIV{+z&Fbs?GTEgE}ZENZC=w0Qs@0_|F_; z6k6HwRvS`9%P-V~`#mQ-7am_54AFaX;y=~lb@?wph&v^BGJzsDqyD4%yEH$zh}8{; zPQd}<`@u6W^cnO$HA3OM9=OTK;qm~WJSlFi<7TEPrlwxj6`urZmosD4LG zq9FUUt)-(HHL_`G+|=|_(u{mYl0qBE)XLHxh=zx#la(nBn-Zc&)CmhQm@mmGFsc}+b z<%i0aZk_-7woC=nL%&t2JT{Ju9+w_Kq9hWyXUGC^;f&6&Rjv;{4F{s%`MF@=iDmZn zZ)7}Pca^EAO~j_e^gfG5$X+2g#Q`Zwh7=C4!BaogIoKZ`F7pk9Vu2$KRcFT_u9mb_ zz^!z&zx5z?*;rUfWa~#Acd~A?!~@W!ne#z@ZaH2BXJDq@;6(6U)_SQzXl&#g>{ZM7 z)O#6$KSApyr$rfs!8HtIe^t+~4lThPEEH& z=eEP|QXfu1^?%iCbBbCY_?dz;RE{F|L1C}zUncO-X* zaoF-BYcc%m%lx5Y{-G?+f!tx~5lSIdoK?^)j+nmc4}(mMj&77Il;MTNHltRJCR};Z zfTj{xzmf5BP~xARvl1#rEwYW>aB``n`Ukrk;>R|@v;?K?m_`W`e>=jm1uA>)u>{>D zNP)XM5p5P}X={*iYQ=6Hci1{OE+pkYOD_ijle7$PcbTevaERHg?>hyC)6`)|Pd{Q; zLb@Wu*VLrzDYD0OzC?ufpSd6FLVmL{;yW6SPCFA;Rm`tVS~I%dMpVW$P|0WvTF_E| z$kxHbM`c~D3V%$rGYgi)7GQO32j_MR~evnYUds7$f9+?umWjNfkd1cRYn zG$y9m^1+xVH0df2Vl{kq=PtZ!)qTLcV5BQo9mvGY0-H@&X|O1N#j5FCqqZisejl z^06=}Vdsy!s$Y@p$0Mmn`PAU=smBjor9~X288??Av-y_m7jWm`liZD`e2pirpV+8uWFdcIEit57UB z_xJT-(R}BbJeA)`h;-}SJ_28ZGk>fZ5KMuxyu#BY~G(us$rZ^diMhDD4m;S!Ty&+ld5lEhnSyDV4{pmATm8DQ}|xX9ZCk+Li~T^iA4?o|uMyU7v5Yg{%ck#<*%dUE$!Z zL~ob)R{l13K(&%Vg!ny+*%}3Md2TtpchqK(tNz;?io$nJ%SUA&g`eAU{c5NEN>I(P z^ZSJH=FgV)CBM7oI&{3*(Ttw#X5%C+H$?0>oA8+OFzRSVd#njX{bBpYoX9Qb%~q8a zvy;Ra1aI!fTHq9O$D=E0k*HXxTatqSMST-FZ#c!zbyxEyY&^_fbeNj)(MqmFb&o=P zyI9axrP=ahhF`5WZ_0Ww@yqCH#46DroY#$WQKwJ3Mj|qUdP2Lw+PXf{GUI#iMt2G> zfwvWL^BQaDWCcaXCQrW{6?0ZnZ^cOVM{f0~P?eQNXut~JtZ4w{0j2)~gJ7S&^2YP+TfJ}WRUoU&|v=PwbU z$`NUwB^BT`tL;~9MZFp@s~|rAKQmR}4sW5xGHNu7Uxk^4Ma4rq;8uh(O(S)Cc*Bb_ zDZT0=B3;8FYDObKpV^*#!-c8|QsxO(4A^_qMO2v_Sd2u%;};?V*r9CR3aTw|ich~e|K>QQ(nBwL_wjMS+S%L4T)rE4ll0xIUgRwzAF^r`dbh8xtC@X6m%Nlf zh`soKmBZrN^sU&JS#=j$x1qKi3VGvlxbr^oOSg;UOcfq*gk}9&)}`MLc=94G`wlfk zeiagvHg;#Z;XvF@LvMSz)t|6T+P~`2N#eSWyepe0@w#B-&(F!~0!@TSn`I*(kJv=0 z@d_O!Am8NhQ}br_XGA*Ah8zq*-=?eMcht9#YCZCelu5%wi$~2}g>1BMvLx=a<~4Nw z;N+C8LH?-$x!s0}-bG-OBFw2?Ls*oScB7&!MG0LjJ_;(0O8v7!-QrP3Tvk+u?cN? zsd{}3$zD}$_9(Fcse74p_}N&K30>O=n6XlTQnG=3YU#Q zF%Ls)Bd75N!Z+Wc#{@WO8bl6>jJ}PyACG95TnDDO>58UljKC4HG35cE?5kSXFaupo zeA5V*j433x1+gH20Im;lby9s z@f&H>s;*h?e2rH{X}BuumoC9`miC?rTPHe4_Z&nz~yoe%tjtN~_3+UWpmR1J%3H84uM+ew7Y-!I$%800Gr!dy$wkf_k zChW3ExOoj%)5Gxhh*nzd6`!MDPq&|<3~BKe<-kVEbYirh9o*vq&gDrhc&(qFr{N3A z63!|m=H{{+oq|Dp{~F&fLU03%L1Sbi&N#`frAXGyy-7#0gSx!WE+xd1$@N6(cJSz! z9wnI*1FS)KoT_5fte_lK&x6Dl6!h4)xK9f7m_(l(2!lyACa?30(B+NkvI<`+^x2aO zqG~1A%NYg0d~9{8iWhIE0yjR!#bhi~E>%dH{o(1EvRxf?D{v>#V3-fq->XG^B7&0@{;rSX9Y% zP$IQ~ivzBeu2BOe$i=Nv18^i>Jx{hM3GYvr{H}|&eebUfkrT^l)siozsBx|%VG&DZ z&smh$Cz%(??dUUO(F4TA4qPW9Tw_*I+lRj2*$d& z+X<1za;i^VUxno@*XzOyjDzX}-~~=M2SjQHhYQ-($=1#(ak;IkiDs{mKjkY8j8E3v zf3J&md>=gCp`)7~5&W41;uyvWaHTK2qL0#D3Pm^)9 zHbcAF;Nnc!)-LIWd#*{9&gakB7n}aC^Ci{VkodACL6MHjT0X{?x%hJyBkzkmQu1d3 z4`2`dh+amo#Ex4(wiGFGg~S{$77mYf>vOEuuyhAHkF zTw)SN3iTHGxn$2tX1-6A0c#$2^t|c^OV^uBkGf$P2P}?yVSLm3MC1%Z6hb2*6V}6t z{2XacIZ!!^o~1@Poha{pz5^27TrJkH2yY648mC#`i!^Rrq$6CM9BL-R7TNGEEi(XS z@;TKb^?s

lx0~H2z4d3Wl>y<5Xa+<>F(#+Wy%QRPtgfby(b)iYX8&yfOZU$&ju< zQXj2RcMELJI5N!U+owyV&#Lew;neEWL??|LQPQEDC91gSLC$Xed>#4dHR|uGC0`RV ze{dSd5QQX7H+V*NnxGN@(|A5T;HDE>f~e|>+d^HMqJg%l$#b{Gg>?E>ZMPwH{8Zi? z4h#E!ceizU`R=QaS_4wS{r|Cg>+|G3Kf_iz#O3uHo#;6@;=a8`!NtXOEp5FNW@yU6 zvOCOHVo>DNbwEOT!;{Zy^MO8SZZkihI&V2+K|nMJq$9#~tiNu3Q)B+*GpagN=ZF^! z$-}H&@Y7&&kwR)L%M4m_;Nn96nhu8!>wg-Wk(<(dBmaX5_&PJAmTden5*h>gh~)Bj z9SLO~kUew6_Tkr}JtH5(_bL;RVi=K(62W+|`p;Jk`qVVfR@wD4N>2RHU{tg^(^sD{ zMA#YYQ|K+Mt|@jB$=V1o%Jjh|2Aq&D$Q(R7_djjSjby>#VO)s$D;6I zkfYzR8W$c-UjNoL&-%sCNKsXe7c%W!+F9z%Y=D1y)ZPQu=wTz-UZNHdDpP!LFHx^2krG|vOF>B=W_h`xC z)kq~&Kdi>ZCs&rW2?US(%P7JlOEX7fLl)ZOMi_CJzF+W%DD-g%NWVk!Dol-gHh(o# zVolI6=_}43KkM`c_Z*8^lruzOg2Y*zihl3c1R%mt=E)Kby+xIo<=y5}Tf#}fX+O?% z67g^&zhm@U0rGkew^vHyDlmWXp;lCl&`Gu3#8p3RnZ z>%KRc-e&0T=GiEkF8L*4{-4OgqRML8?_{+i*}QKIg?@$K%>Ioy$Uk-LagqMd(?7dC zCXY?|zY(o}o=d;IiTT&``mfV~_s@^&|G3@%^<46Xlt=4dFYuo?XFmSV`uOjECe=~# zKzY`-w6=DGQ@!!2sbP6}#wD9D_tJklAr|q{*7sD)B_5S}$l4+{6H!+~snwpw^ zciU-4%dKgDf6H(?%vV|2Lpd?v3Ed1qH-=Wv<6kW;EenhPTY#r?+dnWm7A7XsjowH< zc$&S=5t^#{TblP!ezo3yj>~OFabt5cGCJC!!SjEQ?1hY{=v+=iO|2VtzIk_4JMj;9 z7ZDrVZ&uBt|6o5`zFs{**~5uJ>Ai*sS3_2Xvii+vv2M`TxC+1%Dl>prAl8lP6NQ)(Q~t=q(*h z{B6w2zTrl@(qQQ6c2&jOTi~?q5-Ug8%c-H^{}EjuE;?Fiaq-%55gK?N1fq5fIEcII{dNv7c{k{=ftxQQ@CiF@q~KPW(B2fC`kYz}}p{ zphZj;)&+-pfsf|{WLm`bo&Sd7{!y~WFCNi8m(tQwvxDgp#XPZ`hf_#i8Mvcm;Z;9n zTsZtYU%(M?9Ae@@xHO8NK7FdK^Jk8P1q4iBtTna3WQeo>?;N^U|C7=`@blNNASU%f zPXBxNlcr59r?zwHBTwJjdPu;NkCD$lJwk54{r<}xd4JfP#Ck5qd1$?&bwR#TQjB|st>Ad!x0^;I%)#d}Zq@=_1 z)#m#v?eM`YABkw@R~mH})e=_QPw531wIp}UiH(Q2s7$dxe&|W3#ka0lFUhLhfx2_2 ziQhOI0k8BHL-JsPc;KuJ!$X+q%5AwpTECY#>#ySFV2cTV)R#45@akr%>0wRiXjW@T zjQ(u(*E`b1WuZvHiKW1q6+gjhY=*ET$B~Y;dlpJFl`o9_?YER(#)8QvhfEk2ECtVN zBCP($SnR%zf&eB-;Qfs465p>JvFQKkr(_j?NlF#m-TjF{F}?F>p%!?vU`@hnr%_vw zj{vVs*#0X+MgQSLL{YBKcXq=T9(`z02zMM+hpytmP9Dteh7>`$C+*e79$M@o*NsK; z>n}RGia8JS@Wf=#HnsT_S1RjEMge*NLt|%D`|;ltk6FT+odGJ{h9e@vm$tCCKXTGG zM;%>oNsLNMhX)CN_z2Udgl6aW6eQnIU3{tXc#Y6UpzOJ=nX~cy;Xm_Q2p2AJL1H3q ze;n0B5Hfl?r|B#BN@mxxzoh8M&L*ey*`u+ux6cr8p&c3;8k@ag+MLTAac)jRAD-xK z-B#;Zc7yF%dOY)KIW9=MHzv3{lT?9%hZGgn#3@N%Fv>2LFq*dvHdMNq%@LAB17nMf zhs-H_aX(DNKkrZejh_1*LEp(-~Y;5649m)plfFhVIQn@vcy-3_w8-vI4l zS%7uB#@qCKvc1^<#&~k-zS`r0xf=OR%2oHeY?Hi3{j1q8|J3C@wA%8Gtpw-}vB7si zlwm)I*0?>VHv-!>^;jK!-G=Tc+*AJDji1KhbMZrRO3L|8mNOUV+3%n6!wI7-UX$#s ztPx3U(U7VAE%BE-Y*kg&e}-*|S?D-mQi^WiE?0GaOldv!K!P%vq9gi*=lm`*N`8^)~csNSPvR)k-0`oO zjQg!PQJ#FC!$pIra&mP!e<$XZ>g2>&Vqf)Z&^w+lCk!~oCo$r8Y9s={ zo%m5cVb84VvpI<;10G$+dDe~=QFcbBxb@bCV?Nd$Je?IM;-)2bj3i9Ry)yx^5w=q! zN>|8?)4<|hCL!Rg$-ukq4MP+`)>zm!DnBY18alGs=kNw>+HGWPJ@uNfjgsFf_{k%R zsHi8DrUPjHkg~Z=mPix_jX@-)1fLQ^9hC~b=lKU1!EP?fX+2}-{{ENC`XX;4ym44z zcXuwZpQvF2F;JF{O&dNMBW&eHvYvOWeBzG|N{~Uf)kiO1jB-!$awSUht}`E(nYr+x z=ozblN@K4t|1sXIiX=9F|0zW?OoEYqsUY(M6;pXG#G8dkvd+;x{P*=Y**J8TZi z?P*Ru;|0z|HZ^kv`RHjoce?($(h`d=16{&gz_Cuh}EsglehW1k5x3BHqlSD5I`{IMrS&&S8^ z{duNPSaIk3%GHX&n^5e1Y;UKk;dtdSRCc*7 zMHDF2#HPiH_>Hd4NkzWM$$f7+^J5mt(u{-A10IzacV?u0c3|+4XbG!9UAinD-R)5< z8-EPANPNuS`T5ny04pH~tC6f$RYui~|6U$>#WAp!u;XT^}vYU&n+H@GHL8`At;pI_JB4I3+L6*7A5_`!4-vysVLA_ zp5xYrA`nEPE*AA3Gd53%wWm{6Um`$SNWam6j+Z|o>!c;9YSB!q!a(kza#=%Qw6{Ug zs`JlJcUc=3+4zaWQp?laoczLq#9X65{!V#ETTGlt3c-q7TX!dD=10iW+_!ZPHNEtK zlR7pB#W4okwL6SsLYe1=nDXVS6j9`DoglaxS~;G4Jn5NX^BrU+-Ue{bBfi>=@sQss zh@I%`(;v@d%c2^yV=J!OOP8!C+mz{0iQtELVv3-?V$;K6afp$JD><* zSBIayJAYz-%&yK?Bqno zv$+si+PLYV5l1TD`}DKLvL!cLdAt$=tUB`{9tQu0OiuuzCxH(Wv@KGFv8`97;&oN; z>R`0?#Ma@dRZ@#5)2jaqudtiQ($$g0{q>Rjps7|5MQ->~g#v7CdCvR%N%M+SEP6{; zy!5SSb->J9uHo1VOz(kC)sFB`?v;%a&`TPvI>wVv;X4PHTE{*St%BD5+1)p}c`$+Z zVRR;CLl%o6Ub^Zj7!HWT3vCD7M(nzhfoPS!C&;iZsg5Nzscf{Hw+_1P(R1>yXt#}j zPQV;f85++I#~kH)JZZYC92Q{r^%a4*{G=H=Ol`f z-K`hp)G3npOKzTQ-YbmLGvtf)ZnBv#)ZGFfYE>3AD3o?XIPK6qt6lw>=~J+rye-JR zR)IuMxRNUO0FCeqp)xsRNV@K2Y7MLmtWyKXVGf%-ADumV_9NYV5lsn_R@j|@IEgs7 z2M{^FzuA3zJDYlO8OMD$AG{k*TC7MsK_cKw zP8Ue*DV1%6=(6XQdqMYT8$6-hTRk+^$`NC^bCVg?VyC{XO3F(vj%Ds-3hu%)I4D!Y zIsyHXMQ=*X(Jr3QI&l)#{x=btxv@O@yraGXJi&ZxroOkrP8rSI;B3&C!XodOr{{Hd={Hb#Yv(G@5t)+6}Z2#=9>>8?=O9`4As&{9L2@@}A@-EgZO+(z-v% zRZR26(VILU{1Q3j{G4@X&mHM76$*k|gZZ7W9pYty-<;KI0Mwe6kC8T-Wc=Pmn(h*mVWZL;;U6o<$w`?kul&CkT6O z)5N<6oJADnH4u21TerfUGpcD*2t_B%9CmB5-jez*)Vne`x1aO5rR67xe0{g>$!I%5 zR7+EqjrIO?rzHGz`@89JKo5`PzhMc|sUOv^I`wmU?x)G+?G%P^#T8L6JJP$cf6`oa zJS(%GW`ADH1de)x-YhJX_&xRuOm;LT?SJ2yeV}4oQW=yr91k~UH-R2l`oYFT?ynW; z#I|1*DhbD7Uass6C)`#Y2F{nJ1T=Q^CzsXUUG=?e?hx%hTriPaKGu(|K`y0lv(hgQ zctyB8SQ3u!FK-7Z*MksM*Qlh5dE-0|xr$jT@;L{HrW4n2h_9!_)Sd`&mz*fqxv8AD@e!rr3=7Y0P9BNil zD3$og7$~bVwVoSx0A0B{c0*@-t?zQkJ#nc$!BBbRhAABVDHBD~IHdo64a(tsX)?A)Hvbg2M?hHsjBJy;&zGu;-zo&4*c&$sAal>o- zsqOx;;_47zN^_Rs`Es~c5oSR{6-y;40Ysf*SXNSqL4DfkJ^W?iOxWt?HdtC#kcg^? ze|Db~7r!gJl^LE#_k~&KYEvW?ygJ?%$H;v-tI)g~jn(lMgAp2VpS#;H((|iSr(f)e zwb4K@{X=VLj{VNXgmvETVY4xyp)JsL)AFRFib^6Em(OW1zpX9tmzl81dYW@l%`nNQ zQ|_0zYVB~f%;tlFPVcuR7St^`GSdzb9x0u zaX&0n=axUpmyW!p9t@!=176Nt&zrYiuBBJQ$TZ81uPDUJWp|72jH}(pWEYM9;EAJC zzp>Ok%n;Z?ThTS{s&5qU{@rLO9X`*=Tsw=)f5WtLXCd|yu!DBNye#5*NkAv_#n5qx#JmDI6tJcJe>Po?JZZMIm+mQ{@vDgWN&}{#)1AR{pjl0{RHF1 zas#O+D=ZrfX$?VfK8d7omR!glJw}*UVhCbI5vyZayugeTeS9ZwNxUSu19WQMQ8Rly zgaL<>3?BdBH%tU%S(h%mznplqgzr=7|HmUN)g8Q>FNTfEeYB5=yvl! z9Y-te5Bx_9>ud&1Un)E9?R4QWB|yr^$VgI_QwzKSUJkrxjfjVut|t8Z=aj(M~l>UR#xlQU53Ad`_ZUVLoT;dNT9t_#rB%WP|MN%=U0RD zFU0SKNr?y(z{X8F2)14)F&E3JiPHK8YPzcLbq}pa3XaQP4UTOxC>Yht#D+Es8H*8v zvg%N|)h`O0e%%&0h(Z?pvCT25+~6}HD&e9)*RsdAZTu#8vR zV%6@fE+jXw%^V+y?jDO2&u7z^CZN=f9`zJE-VRBsT1a|BF%E$@SJ*W^eX2|e)!jyk^*?OHr6QsU4C!}IZA>L1zjje) z>^%EEqRW_Kxqq=yktP?y^re%PX!$(5Ty`vhv`9uImN`Rs8)qNqG>iW?1H0V^ql{M7 zT=HezSGH_(;rBfTfVpynk0q%{SKj|}|(9_qmyUc3HYs#f6lK57b z@J9~KN>{dQBI{Q3DjbN=Qfj9>KCY(a7a=2x1J5eJ1=H(gbH~Mr69X72U%M>^Q_L;< za))2XO08F>Tr^(lp;Obag%6V%S0pUnv-9R|?k;<$If(8tRC&w_$YJrxwHm$On(E8g zuxtCP$)0rD_5}fe+&@?`0iwQ}F>=w+RxmWo@Vo2ftPVtyVi+C-DaO+u*5>!`6H1aN z3f>OgPlO3H?^nhZG<7V5)tJ9QrDpTROk*0kB&*M0!@gC)f7QJe9lny5xcD?MTW_5j z5{mSe`kcad5WAH^1}H_IH#{t#!E2wIUsMz|cPQ$L_@7waJyoG|nyMHf=u^<^2pb$J zg(MU_Ho@Irhl19!6Jd{a66+O$j%U=XCy`IhJN*zqxwUVQgOwr!H=7yW-rwp1aEJN$ z?AOOMGfDNyy$WZ9c~mq^YPxdd>h%GAh=l3APila0a3ZPcRsHT?n#%J78T7xAj*%++ z-r3juV;1#zB&1SpB#MA87&4Sh>oIc-F(j1SR^-*j_8K6Cu07U^lXYPFhMh^sN5$CON6SaicgcP6&~Ko3B-zW*w&v3yUT~PZ49g zQpU;r4gvv*^0S16V1kPr1nG zHHrJZ3Bq?TzaA9vlTLo3*J$WbbPP+c1q62!|2}SKcfk=YGp-D@7`Y#ZYlsLDJ)K{v zo+kZk`+`7UQP)-3P~T!B{+OWu-F#QtRfmC%qzDV|{aazJuh)OC)%<+D(%N(~Ic0;h zg`$HR8N=9YN&b+?pmWHr<$cHuH`&Yxt?@ybgaM#Hzb8 zYZ%}+p|1(*w{J8dpZmTkRMF*)EAbmCtR^EpvYAvPCuUD3t>ica;uQ_tnu#FD3`rY- zc8P`~e>j%0*Vz-eehk6Ry1RMDrXeMD>{M{^-0Jk)jMTKgSI)yw?ihVt$)U~aNN0s9Sh zN5akD%$a{vA(>GD6Y3P5Na;${>1b68)#;>C*h*FyWgMWuA<_`gM{296{m#k4m+IYj zD|(*!71qzgW{Ma;WYlN_P_}gmq6m6iU5gTj$OnHYoLcOklmQ-lB4Xe6Y_Xai|I!ul zS~eW5{7TuJxapWxC75FjJRaVaz%67m9y(?zjtkeDsr!A(is;dvspOB{>LA zAnbm6jWuso{a{|b_|r7+-fxg>r?x`ADxYqOnH?7rJlU=jjMr2LawZHA%}N3C`UzR% zB{`FPm4)7z2x}caUoo0-na|&BGYaMsX;OapK#lbB+GxePf5W=V;01Sd?ZeCK*M$st zR*Da=*REeGnMSNfIJk=ueQxZOq6kqS`BR@QQ^?Y!H3j)49|N7_Y&7?WRbVkduTaY*U%xC~SAxJA<5>7a7MY-(wu_|>mb2E9-UZ%i}H_0k7Mm%rj7I;;UOl*c&# zj{jsFrccwKF%>CRhDQHBv~HPfM0`|aP)`!C)&_m+IW}|tYs4jr;55qq8yFik6fz?o zkv?02#+}9my4Oiu^fI5{Yh@BJ-*efr6myBy<9IOkn*S_2X!f2{ApJ)tLOCJ#m%;D! z1Wn8DxbNMDOUp_sOMVs>hGk~j+1VY;*I2TU@i~|{s)}*MoXkf5zVja*99*M%fuV@{ zT&;0{?$B5LZz-gtqy%!5F*dg#4(A9mE{49C*eNN%4ns_CE8C4vJQ=7Mb(_w&{%uVc z_FuEV5XFk9Wce+9Rof|9^O#jN)IkaC<21K38)ayv@&tiy+WEoxg*PF3Y%C*J6-0!1 z3;+rts3L=dC)rPlj&RLfH59C(+XzX|aam)^znvO1{^P}*N`qx$Uf79A5aNJ0ow^nrimf#{jIN;SQRK8PvFK^}! zT(%}Vd0p;9Lv|7u$~iHyloKNH{z=T%jF%IA5k8SK{Hv1Of#8AlUf*=o6yi5VU?QBe zlLrvJ)HE<_h05=|eN}+&KK!B!g=+2ebmD!SNH@F=beMZWq^HIb9`_phSy_34e=~{! zs}wzNIww{_W@hUxbf=}Qu0Pd&kh8gtxU2&Tv^ljYNq>>t%bg5L1$0akF_ME*@9_n~ zh`Z?i4v0xais3iYCsc~=StpnfecN2|4-u&K%D3wcv}SAP zdxoF>D~--nkQr52^kwYRkH5?_tLWT@>`hs%Lg*3_D2}D&y)Sj1C?QdyuEX~BfX@OX z#(6KiM%tKUJM9ziZ^ei7tZ`q6AKt{RxFaS=c$T6oe4Wc_S&LLnRJciv!JoM)^oZYH ztH3{1e&0GT@sN9t6s`}*_+ho($|?)_#v&BTqie0&Z`Y90uzn>57<;BGYiY^EB;(_8 zD^nU79YtFE6~%70HJAWT{)G#=?Nr(PrKJpjnd0K%k#Jd{w)@@|Izk(IdwWe9^FoS` zTsS?Q8TdJd^U;21bEr0?~{q{{K8*Y&pi$qI;QZg*G-@Wm+WC0_oct6rArF z5cAXDEYm{~;U9?La$Fdb0|R<{QPt0ZV}Ag_zI}<^z71~N-rnZD8ZoPmPfqTI=laL# z(+s-9sR9P;Y^Txz=5Hj!FxD$uPc|MeC!A-0RQH+D(=Ddi>&jYg-y9pLPo;In_nV3Z zXC~5;r=%blaMnDaz43?>Q2{CNy_nGIF?KJyw?Wft>EFk3w>HwP1^S(^56d4ESMYKs zjE4H_=YGk;sesPCXd|}XRGQwd9Ks`akH@dN4E2>XB@hvw9vJ>|3Be-bYFO7R_MF1T z-W&}xoAxyL-PhT5F7k;#YBJL{AsZuqy|{d9V4tey{DS?; zk;Jdl=p<9r@1nc*$mWu+XZ-ot%^!{|2;E`yn9P)FMw9W!HZ?V=-~7>k>VW6?CV!gC zP&T~;z?JJ(+6t#!Kt6CDWx)D{tS-}nSX%N6MyC^Yk$+-J-@YZj=B~P`^*fx6sL$Ta zwK_LFM^D$3&Vj#F7ZR5Y%2i!L?s9bMdMwbXnP@UPstITDy>%}Z9!6U?SSy9wpPq9t zh=@Ddveh{sj#XSe?Kclt1Rc* z+UzB@h$u|qADC`W;%gQv(#S4D07z!U`zlKgY3zv26awFcb}Jlk`#ped^W+BzD$B}n zCSb1%cu-$~xv*FpxB*lV`4)4<$S-{l0SDQvPcUR72h;Txedj{;!!HOTvF)sxSEy8D zUKdwM6e@B8BFxO+g-_dRX{^uA--O4}oA@==a9fYlv9VzyG(wfMv-to%5etYUYFYmL6E|8?uKp{WuglbUJVTzK=Y{hkjTs zPl_Ic4y_u72wq^YQ_Y{uLxsDawk=@S*y7lfai?U+hzV@_qV7n5RfxpTqqSP+G`AVu zWgEN>5O1}r{^+nF@=?jV#EFiCF1huAB`WlX@THbpdV~3OWUFKMTE`hJt;d=aBkgPR z7vHrucy=M7#gD&mZ5(5NA*-oMQk$PYce7bH<#V6cZY+x$Y=FoX|8!;WB991Qw-(cj zd<^LlV2&EnyX1b?l@aXow^^wdu#!l&>>E2M->5o=`vP-t?d>|b;_$w#z!CvYjMYU| zVw2BppO2NPoGn~nQ2Li&GWl2(!mF`9lZ17_p$$NWqJi5OZ*N~;&H;PL>d(8&-CE$K zRcTon96g7J9)3!gd%WXVnUe{YVia7%{|f25ppJZssb zR4bUs{^z5$wxg~viU;LImDuPPoE;H zy$Iq~J$ZTY#J9n_p1nuKX3MwYRgVvr9hDAw{oU8UNoE%nZ9Z9J8EN=(S{w4#&ul|l zCNfWpxXeF#={;EzOO=ced#$REE^O*I=45n4NpTyQ4c)4?oy}s>Nq^(mbOf}zYSjV< zKFSw$^grCJ!{N#T7}^&qXXWMGq5d2w)2L5cHCXC$XhP+kC2`Z?L9x9b=Cn-B5u%I1 zMWTN2 zYlmQn)y`-Jy!!p0q|QCOQ(XJ)ih`ONZe(O6>>)4Y3MH1gVMa@>>D1t5xLz+&9TbyU zQFq9f9t9@v%AB)St=iT<@wY{FTWtw#LRfttKKy7ndS^)jWeGjZ>&SU_{C!}5uBNH0 zEbTJ|v?p5?k3M2P7|tQi9`(51d2PqSCT#F*@Q$yhwm6jrtxMLY*V zrm)v;m6JYmNEJMj=s`niRPA=K%iMPgq;l8$)!g`P;kUh;yq|k%e7(x!iz@fn0s|oiFR;&Y z6HItLoV{mSBjb7EiDfM)=+q4Mz9hFRicDo+;gm>ggGL82$lPDIHAK2s{*zfyU2Ih1 zUM}_sI>(YWWQ?}!CtJSe2#dw+j;PvUar_;X2wPXjmxz$7HVuZcCAw|pM&gIqzsbJO zdF{S&ajlh?9WVD!J0eLL+1R4_O4vUOP~fRx1*UK{DICG#s>4^q-=ltfU@q$}C^PUK zIXSNMWjU;`EFw79!-+>${rq<_HTX-sq^&Izn^G2oQsyOaoEvibG!Rl}tL_tTF}d^P zC-Q0N5$iHTjw_>Wmpjk-LW*I5?Q2S!K_?roj!iFDm|9s4_HBMn%cX2uz zJfyLk_%91Eam@8P;|Q?Iq0HxaVB|AcuFL0NC9Rn6keH$WVB~g(T&rm5((B!+Sj!o} z*+-BxdNifXt-?>gvXdVx=h>LP`Up)Mef%+k&`$--172qTRMV% z;d0melxAqCn!)cI{@Zq{n1>h*CE}ElqH&voh}pFKvNg}fx?EUXx@&umFG=SaN`8}? z{_?n~l{75GKyk_~es{$6nmCQ$MxKQ+e6~Qi)kN?g5f=yssB9V zun4uRi8YVjOxKw+y3B!AQkJ4GF&Jo=4a-NxtOl2e`A6RM)%q&g?VXHeKKp}sw0#Bk zh||uIyBWx_NJeU+cdy9C^7PCG9BUGzJ1;QGwZ z-Nc&~iu%iLJDDTe+p*UKnIxbmXDfZlDBHzn#>+U+Ev5Rqvvf_WhhGaCs~Ha~fPOk( z;nA1&L#<7+N3jpUm({FQ$X^??SaWhlbG_AhlMdLo(BNyNQQpRvBHN<_@dmQwvCPk2 zQNX=Ch|Nx@$Q#aY-6O8zDrn|43w8SrKFfX&>e?BlT?0Xr1jalZ{8#A~-SDzl%S_L5 zevt11*CuJX^Gt;}e@uIOGaM4#iD3j;&9aV(zJW!tjC#DB`%X1M#@bp=oZPmD-)?%D zJGERgT7f{r0{z5{S%9%DNQ)`LmzZ&G*}MV=>4`e$DtF zLzlCOren19#P7`Ns<`mR6a(f~)G53f7I_Qaw=edoO|Igbef0LDjVJ#7b)F5Gy8)L< z>Y7?M1~XBkMFID9`zr0%qG>#it$BRf1bV@7!bMps{&x~n(>UA<<$%b~dBD3a5a;vK zTVJci1u8yq-wUNAS<7fS1|{eX<7kTIbJsvl#?J4&=hL6&F#v|y%hG0#Q0F~B_x4T3 zuMJ#vH4PJ1&OU_>(}wG|vCVzqd@y)IeKRShEf(E1(e|UDKGd_`#ROj^R(yG;F# zb4B9^r^}b6CNmo-iqo@<`rKb@^4cDQ4a(*n7Rcqm=Chu=GicG^wo)+o=eiG;>9-O|8pYDno7u=v)9S zf2h}Y)Vae=l0CvKv3Vb~YOQy#Cb_%v)EhVEs_g{w9d6zk4>#iRmt=C9tvfZc7HjF{ z_kMJDgTCAr??)9xF9fXOK>Ed3rbXPJ6^*liXQ_vytiCX@BnHrm2*i3Tufn2y{PA)? zo5FMD(izsVFLna%EjdhsJQm#DG(sGGPS&_qyYZqs$J3CFs>?FhpY9Epn)@;P(KUw4 zyT-e?R$L~kwL$wE(Mfrkx46fDntbv1qGty$B3ZBf&zF>sr@b2&98RcCPP<|K0}?l5 zT1^H=`jZl#6Q}E;{n%*U?MX~l+E#w_9yg*U9x&0f)%N`QdXEmc-L~@`eD-Rq@`bq5 zaHR2WLP9ploq^|7UrqBtQ~O!Np8+(v!Qm#9d0GrG_v1yhBvgnP@n0~K-Xv!|r0rJ* z@M=q50f9<`PK>rCsC(*mtb%wCAV+=Mba%7Q1BFE>mK@lC?9^hg#5HZrf|rO%hU>cG zxie)jn`X5Q+_J`YKxpG&deAFjd3l-Z>&cWMq2kOL-w62|eD*?P*eabxFW$bTzOH%D zH?vW6H2KvIv(KS|WF8kc;#yz$FSupwZ>T@i=Un*H{>mT7%0Zer6Btd$P~@p2%G{vR z;(HpK+guRrGLxNE9O2XDk0O{%G<-`tPfQ5HjIZT?) zckO)T+)cqW!a>)ZHIj{up^}|$?NMKe*Okmpnnlpw8Dq+faF5}v^tb$XN-BPE>b0dG z+}}_?kWZ;BM68_RKV{Cg`(+p=X)Qo?WtixWHPs*yCSiborpDC;@crog7?REWVysw0rQh$(Z zId^m0BD=gUwRkNLjJfzmCl32fo73E!363-fq+~duFf0rAF;yfY{A3&MWUTXfcaCV$ zBlLyK0cYL}$rmT#y4QZPG@fzZ7s1|ox7U}K?6T5^ZKHB4N_~KZ`*LQo2?p|CwU_Iu zT`GE5i$}ppyzcC=LE0bjnN#F;4@;>WheE|JB&p}cU$`>4Bx~^m_wcoId{0tths)+O zb??$^srF=Z*iKn-EG^UPe(73iRdQJ+0!)Ske=ya)M=7gvH&+}bo!4kvy)MdE`oMR? zQLjU$i}5Yr)_Ai5wdfn$~&6#gK(3z}x%x)BWmY_92Vj?#vI5ZOsJ(z)bJ)XEZZEH<#i6`_Sv` z!&iIG%~wjKR^Mv2-m#D@eu zHto#*??ZH5vgt4;+AiCvT3dR=vi3dq-Ro}uY>(?h{_jKk=#J!>0SKDN+pWyoP+&e1 z6+C|{5fKxwH!%F;kpzDV$;f>jnyM)FNIW2MUyE0}fC)6Uetz<%b}d1(YBN0~q+6tg z)SqiT&ZXoZ75q_R%De1x_%35=zd=NgGYiuyIJk9iCTqo&1IqwFEeaY?F4M^vTM2qhFvIsg0!4G5OL%z|el|!r>MVAT^PpMFcm}ovAagW;ezVYd^wyT`-D- zP!WiqB)#l%Q0dDaa?#WC0Uav@kuPKmE9i?#OACH{k96Ow3pOvQ3j+1`(43qcu2L!R zoyrNX^U5qK;2H0%+y0U1lMM%8L`r4jB^cp{cQUiHi|5PJ)xi4)(nab3TH3au>vk`! zYo&J9O8hE$(B|fJl9iJfAD;}RTpcSKWPw+}9{-)Gt3bBoN=^=4+Y%=>Hny^g3Y8Li zBba+IGcyCr5(a146D-FlrKU!*WRvvl@uz*ezk85AvTFN@W`DHWQ&nA!)6Y(Xj~^4- zODbh;%^@Zx29Wkk$CpR{$=l2Mx3xpH_4VXFAZFR9L#)3?pv%_O(z0{cN$JCfZ!!n} zerJ+TwkWWH>P{yubJncRAJ_JZ)Flkh^!Nf!Vg+U+-7BT>e2|rFWJarrb)0W!wr{CCWIO z;d>rRH}xXeiesFI0zdtK126alnT(^Oqs44(jM->@ zxy_PfWMRtViYO^aa{l+4%=!czauIj3mkbXtJ^H>fKUqA+Q^*f37^#I|*66$>g>eNr z2)93@lTqN67f#KC5acv+N{c4vOJx7stunX16cj1CtDVdw%@-W%X#U&W_blrJ>EVML zqm+R3HAX1}@hkd4BLeXDSnW3ki~xPXCHotg4-^aekm38!o3R?l^3uv+b?(T8?dK1FOs}sOGn|E2k2Fm%RtE-Swz8ozj4j~rm zp^Ip(7P}UEqMso+729r7DgM+>Tgs6w-eJ=?Fc>5yHFaxqGb=YY;CN6Xv!a5#pHnLb0~wf^J>H_IyD&Lib$4Cf$8g;CjdgBzAleE-!ZcB82e-JU-*<{T41CXP!yV#$+LO3t{wsQP7wNF+Jb+G0c@%G#EdwCK`} zz13)}_x7{0zobe(mFnI%vU^2~BF@2{82O##W+4HJ)#DIU;!FB-_<>GM_^VF+r{Q!D zJgF_<6OAT_m-0|L`w$-p3XltC7M509%8F^omP3kzaO8^MCA$JRFC2Y1zg3OqK32-J zk!c9qoM+?k7;LYa-5``9M$f0vNG1T)WbfB-UWLXV=tJQvXzmEv>vr02Ve~{n*R|U0 zft?6}B>W_~0@Z`_S6i+ex*TbPO@(v^7)Q{iJDTq9LS>ai9NDN&x?+h?k;jzd)?x2z zwJ+I^b3ditrBUy&M}gx^`k?aKl_6rGNP-ZdG#RnWQyKY>4g41t! z{Z?e>-&YHv`miXEy)L^U7-YyOWzGBCnK#HtLykS_od#uo%YJ)aARR&&7)1kNi1l$> zt<~!*Rh?s(75oSfin2l_VDX?982$B{8U5m*)*_N#2sMf;mq+6d;VZW<58rQL1wNOE zm#I@E(uvmm!i-qagLkHXt@X+$@Orpq-1m{(XX;INHfdAjmK+e^{p-Cu^r)KKZ@u!ljWfEXVz^Jb!9l%l?Xm^c!{}h@ z#RJ9>WPc{B4 z1YFN^HQzcgw(m{gq%{0VQdDeRU6$O5ixY?Ch33Q8-Yo@=%nFt zgy%pGg=!L``uA2J)@n2DO4^phR$3s=n@o1h?VnqPI72$Af43MCVXqPUWW^At!Q(L% z{ci5@yBNfYr2PVJI6`m?(|*4`DHE8Hlkz5f1Cy{4myQ51D5ry%&Y+5-a7}4`%PN zCpG%@=9l)^kAGnz4j&#HljcqaF|DE^KIreQo%=&*!t3DdT&drVN(@nNF)K(<0FKVl zw~~@57g6zLet31Vfb~478Ux+Yz$njC#3p-E4Y?myvo07#0J`fylq#4Al4)r5Xv`)x zM&$Mt#Ve7mSr?4nwvD4jN7woF2&NjKTn|o5z0-H$HPN*g6!$PvLvu-wNyy_qzDqABk{N#FA=0@)*o0acTdD(p{}=a3>|K&WBF-o3fq{Gu8frO- zZE>lR)-DlNt(&hI2zdGV->az!ET2_Z^H)sokgD<73Pwo*S8O;(#=*(ys7Jx;^T^11 z_NEI1@$+Ht`qS)ONT)P=&pXn$#ys> z8INH}`o7lOsmr*LgJs!Uu9PdEgiY@?M#Mfz2#w{_=vy>O54EbEX#UkukRfH1h1FX@ zYI*45G3Ez%5sg|Al%36xF9?0{<-vVl#k$*2cH(e+EL7LKtA)W%5?wh@PcdB410-CD zYtFk<-Mh|T@vN6qUE;7n5mKGB{TPw%$i_%3E%FfR#HkeY#T8JJO^~bVqB}&8vwC2mM&EDMHj7?6`*j^zr0A1o| zT%s9D&eF2tPY6QwpKH8*i(ND4M1lAR5tp7XcuQnnbTEA_DOtC^UAL}X3Zh8jHG5CZ znh-n3Rx{g{A?ALh4GRJ8NmOmWEoIZ~Y3Xs1?oC()H=F!ZIwp?)oj-7xS!UW`32zxH zSw2q_MekhFb5#F)ZITllC4J77-o=IE{^xx7dc9QRQ5{lrEap>4ZyW6!-EVhOk}Gnk z1hzI6co3fzEoCdvoEn!9B`-KPHT6;DsmvWNGkcQA|5 z!{XTZtvjpn#BO?pJ6DhN9PgV+Z_iminxaSwfujP6o6z<3b^IC*^)jD9k98hE zKp*`9n{c{By8-m7qk%l-63CZ5;b^*`1As9`WMpKBPGgdJ>CXhfL&0r60h#T(G-&UE z36eQA9Mt4$-$^8ZhMRTu0H(^UPs9rE2CzF~n z*>~70uqH`c>SI?s2JCeja!gRw9H)-jy>1k7R+gzGvDG7#|sF zQ}8+>{PpWs5@460xjhI5VT#dGTVQ}>wnw6XF{C%=tAQ|2Z}n04&0XSYYN~JY>_@~TjZtmY8Lz-bQ%rj69LuI{f zPD&Dr8GiKap-DnN)X7jdaGsOBSB5S%OK1kRKCrv<8~%nnRd}b-*5u-PB_g)9dS~{u z!$1&JhE~6xaoS4Xs_v8-NK`J z1RBG2J-4mgmcLYf%5AJ{x?ba;u`M|~*&0pewAP~Mq+6L0mdDn%W zQ|SCPr<7CEm8m`1PccNF*T??c*)ta`HlJ!Uuo{*tNpi*KX&-+u>!p=|iA9Qq`FaGx zqT6i`2Ag*@nQXnq&OC?n4z*2`q{}^28KK@c@NSHB?fjvP`fG75vJ6TA+xAajJor~1pCxw%2ky&7EV ztx1#Wz|@0kv4@4-1Rzsuwc_db3Ol^4h)@DlB5R5?;R9mlT7U$@OvmF;2EY&a;Of%y z!vE!4VCv?L{`*!pWKWtTjROjX6ZcG%y0(*AH9D-5RXLLt2CgW+`K_s=HE+%RLiy#D z7|oLXnmyTx_FiJ9>{&6DlLxR{$zUW=<)jS`RrO?9tD4ldwlYf|SQHz_v!P1d^< z7<{oJ;T@eQaV$QN>d^f)YJT!!=8H)3)tYq$#AQ*zhD1+ON2YrWI@kN_ve(*J3W0uiJ5_4Kha(jyi1jglUe-fC>mpXyYWc+t=4yhH`mR^wFN zTZ4t5OJpvVE+_lm-2s2@2U#$=M zK5gIP*bPxl#N4hL1)6wtIB5EUNHrG^s-aHOMB@jQfU6re+LBD#wHvW&T+I@nRow|J zcy99Fu0W4%WAZLLu|@CPB-Ff5DW762kee+U|GEyZJvJesa!BCQIt=ts=J-+w$Yjo& z?{2O)ylCbwJk{r3t4cKR4}vN%3J#hKar9U0G04bJ$U zDbcIaInADirlmAOYIk0%DyuBs_qzIGT@IiKBGU(B_6~9E>uD$jOU~Pa`=Ofv-}j}s zI0BK9cMwiTq#L7>z0Ch6r6ahacNv#ivV?g<*Wyi<9}FyITIf959PNP8CgKJ*Pe{LLGb<*f|BOn zyO(RO`Fr(x$>JH8Y_A@WJ$A`)LGfh$?A;q)%E1&)31w-HdgQJ zb+tM;OUlB#nRfR4qEyj?^ycXRtx}gx(^G+Sb!k)-b;n&t2>PFpQ>?NxS={43d++rh z33dZIcEoGX-v*_9h4e0i5wIX(AJMSiB(ksuB1@kCyg#VLW2!IuS)md?8asgFAM3>3 zUR{vR>wc9Qb=4SZ@i{}8CW>sHZ#H&kV>%&Geewh{n0tnos_4&WjX$Bm+PV_BC*PN# z#<}|)O*o1QtLosDonEkyY^zhwKIm6p;;~p!2xFbfpDxk=btVHX-)RybvJr&Z^z%*H+})gy&dj9zy~oghv2P65nFpM4&uzD0wsOVWM{AtBPc?H047y~|R5}ZE;OJHkZ#m zxa%S*J{e5n>Zixm(Z`m~S!`M2w9TN`{&dc zMvM~=v}~L$RcKvki%aeyv728g9Y@426oD~roCwQe2rm8g~WhOknkE!MhlZzU)$-?#5c_BTB4%H z`${^=m@zVj55y;C4^SA}5BdYqqgM`^jP#e?y>dky>IY4mL3PwBYjH>WcY zA$vC|>zRyT!;!XO8kR{yM%3h*{#?_>)L30)gnibri?uhQ#Eh2&P0FFp$2O&hSg8F- zFwzRfHz2g?*M?v2&SO_~iX^Xy*@&_Cc7Covfv{&PzOr{{~+Q)3RgI%H5 zdb(KP!bl^ycx5zAqkdrTLOC0UmAVw>U3{G5IkNcQOO==6)$B6Hza384G%%mU;dizu z;1IK!a;$}<)^%2vu~C%v_T~=J`tI)tsVfygFK_AwZ@kb+Bp=fb?w6Q2~zrzCow+{Em?6T^+>1xp065zs^21%W!N4G7!+^6l?eX zB==9K(Xus9JE?GN-JSuQ{PG1Z3d}m0i=wCueYWJ#RyH)v5CVs7sOdrIi;9a^m%}-X zfk)X0DsNwa=|9?g-{r<|cEhBv-^S@;fylp#ffOuW{ z$v`K4^``dkMN?GZkV2OzkDM=Vt=co4K8XJL)+xpDp>f)Pi@JmRVTOg5FH>q}fdmyw zi)F*X^!yh}SHb9A7}UM2dyXH9_5BcMeslV*XsT#+th)dAsfmkPZy+a65{$S&I0q{6 zl>8P`&!nWJ5M>&&|FngWnFF@^^{q;HmrfuRTbWg=jIqsJtu#3SFo%qz2Opq&3#J54 zRq8w20VVfbz7OXVib&Kfy<-Q5efGI8{~mhnjTZmS+$`_@$AQ!;tY}XQ!Zp7=BocX{ zRFu4er3&pS|LoF*wJ(gK@mamzaXjGFsiOzC7wh~?DJfys@wzkuiJDGqhV-v()%2vFT zQ@BB}?_8Pc_4IyTT(SW%@u0p=kA=l4NYzGAU=l<0fTxNG*Wy{#-m9p@?|%kE^c4Rk zg_$CMzgO2VH{B2R4UlSH!q){#Yp1Gedp|HXGHX!t{uFuR8 z@qybZ;zQ`uF-C?SMrw05Eb=y12Y(WI*|Nfv{5Hmx`;uJ~_)O11g(OkOjfFzMYPDZr zp$}nR+D%0R5nGPX^n4gDs}tliX>jNu#ekGb0hzgyonJ8WpTwq4ya)MA_R|fsN^DS z_jNYIB)P>rBx*2UV`A|cy;FHWOGDa~En!yP49{tzcb;usvV96IZ)Q5bOF*&eZMDVH zF!698?;0&DobPYCIZjq}5`JUCbU4HArL><@*!yX~uj}YAW;*e+yt_-`K&SoNcaL7# z=v9CudV~omA_10C6_2eqy34G(ml&~33Ihn-g3o_xdO|1`qMjWWA6+|Vs|S!-n|!El z26fR=u3DVd7?4b(uk8IOu#(n3-WAe6gk=TT8KwU9{N|>8ovj!iIDAH)L@4#>F04K+ zBDR?N>MmER2A&qaR=oF`7c4e6g9q-BD1}Q+C1lriVfEY0PduRY_F1=T=Ml*^M@s1L zn{x)i(5*&7C!!JSyy;wQukl#E>HO!uzJ}Y9g7lFB&tF7=Vc!#ihlpx2sPg&?IXHDZ zh6kNPd@4;gVDBSpqfs!6mPR^pjte9eIZ5c?PJ8X!1CEa~h2{=!Wr9Y(*>zF6Mg4x? zzPx2i=gUxin#fRs$_fuIx)aj68N_MMSk;;RNl`|4s>j{ZU30}2^SJM;PXx024EDJt z@Ab8Yls2oNf3Wxusy)}4gKafC`%JdpMMpBo_U<4X@U4OeH&$XZuNe>-S^zIVfNTC& zAc8{3BaaT+aqWTKtF&Hds@eUTA~9~WWza5ziPeNZPp?KCgbU$UCZ?uFrRm%COW_vu zE|HQkbjkXNdbe%w?-=*^+_cz%X@0%oVB8yJp~+g(>2Xc#tL5LG1lH|T7r(7LqpZX| zhPdlAdJmPYVkc2B3)ZcapCZ4V3tFdsNnJId?cG*ZFjv+L|Ky0ZrXT+Kei_<+MD9cf zA-)icme=JP$Uq*g(A<&AT9beR+q3`5AIS*`#CuIwRtPMY{jdz6a6?^i@`n4}myl!U zf6y3g?R|b<%JpK(PX``U7n@c6Ytfdfc9zt1yK-v$V~itYd27}*gm4)HYUEx%va5cm z?V{=B6Tby}G#62{?OfLz+pmXa=O2f=EO;=R8k8w&ehR>KWYH1f)wr^9m~>B4GYk>nT^%)g5$hI;PrdzViYd zP{IhF2S5!QRqM@&3EUtf_@f`Bf(+MKF2E$LYy<3hlEgUODutKphEMp0bK=qU-q!U_ zG9GRtQX#%E4b8x+cmy(O#BS%qbYI&?NQ+iiWVh*REAT5Lt;6qi^!I;|IxqRIpheFu zV5U;&+xsucQeZ9oNpCNiK&}}ylA6LD{bg69zsC?B_R{J>m*(eWt#jK6-#yZ*la>ij zaur%SJMP+xh~bkyP8hi_7~2eI5{-bqSkcuX!%f3_wLW6(B1X1>w2O|W#fJCk4Y_5FPLhTcGr#OXnHq60DqRtFU9vf-6`&hM_hIKUXaFA|0;NcaP zz62Hnt<$@S?Q`|M65cd1xPkGL01sl=Gn3{{*3%%8bJCeVLC#c~&xX)?9N0aa3$;O2 zOY8X3L(6^l2LkDe5GpX7fdlwe-4F8r`T8$Zw6&=+uUDB68wSbzrvSLd2=Yb!sRBHf zGgbJ}TcNF`y5+(3k)!kuH^dk6#s2)8C^hD}_qywjH4TwfglW2SNAs~L%KmH@5k}I-O_(J=m zHxNJZsoLI8avLRWmhEY;z#~lk{aLH#WXO^wFsyBsa%I7qtLmI;YTXg&iMjzp6i_T? z3I>Q98_-0HAa><(-kYUwss%y9%zri8uhUg#gfxGKf|)_i8cpKj-j5$Y+N9_4J1EHrp3mCLin6cr zT!<3uOAw*9ZiqJ))&7{IbH7hdBl4$TKfPDHroHO3&idPrYhuTQjn3J87UhlfTnVXi zs~#07Z(7fMd;C~m)-OX1F*rJEFt7*a^Q_m{`i?;08Oze>x zC(ug;qzpJYtJ#t?^tl?2cDKIegpoknR5V(AacaZFu*Kf%aNDq}=d%DZ7#HVDc`zIm>MpaZ^Z851a2{L#;i+#N zm-osjYNOu;wHv&gF1`b9B&UxwhERVv4sO(U2Z|&jq`i`^?x2AjUgh4SX@1i1bT+g$ zoi5+`i*gkLg*9kE*JJMSt9*`SYek*W>l2^Q+5?E{TC@lT1&!E~e44n)aORl>^Hyh=t1>tj)&h zJMC(q6iJFVtBa>wa3@TAA_Q|&2$sV`o+Cj0c(bMu@~dA-oV?f2rsf*Yxl3wlYhu=@ z(0KNz`KzlEWViymAhld!C70IRb{Y2^($p~9h4VtUStpGlV7P#41z!I#Y9|DVz$X=` zPP`NoeU(u&Z|7#rO?P?vnEO5V&Rd?Ce2{{QMG=1s59z*8R95Z)W#m2tJlkn!>bZsn z*;#h;qO|mj{~|py6+~?oV1aPG4*V}}&^}Mq*>4a;H+xc8>QYWLTOA%zgSfZ*N;>wS zYZ5{(%r=l__4O(515aWYSH}o(zcw+)WP6D!jwaGzb?f#o?F>gd`lqkk7t1T~6buX$ zA<-msfpy6BB3w6~c#xK-koz_M@fNUws8f@IDf`Vxq%H@ z5f{(!Q~Qnk0A@u}3wdc29G&#i*e-i3>V{Z`HvGP>)z!0+91)B{{Q7W9%#W+CIFcaM++IhuW3jj0%kV4k}R`iu&UnrY^={P)_nNrsgeN zj5ruZ(5$q)?`KRIAJk`8V@T?le@zvR4UD?i4im6%t*{^R93cs&y3&7Lp3Uq)cbozy zC}#rPCCz2G?8Q~{ojU@MN6ev<2I5r1`~L+bU+7|c4gxtme3N-h>~?=l8fbLUXeAK0 zR3rzTmP)ml+s-gBW03xyft)#E;l8>@U}n1Ix?Ne*+|(lzgqw*Hk1w_1(_SgEeA4-c z`G7={JG62sdT1e0^SRZUD=Cx{`!vZDd-2$psb)%hLV~8?fLKl+JE0UwZU268kQ=aT zPIuwzHyrvJP9+Vb$kmw+Yo~_v8$PkHUk>_Ov?-d{<~5R11+CR~D-0F#?AKO1yrM_k zkQ8IC?_+p4>Vi?(UlxE8Yd>QQ$qQO&a`|-t^CC^c>NQ>?9w!765NxE;xFNQ$f3`C{ z3t-sSCj}DYAsF~%JpX&o#o7%C64y((*PtmxY)NHdj$K`NbcnE$nZ@S`A5IL?&X&)W zfTZ(DN9@qn8fPJMqA-cO5~OTCH-^Zn0ja2wfG#kpNk^`oI&DE5%V)ZdO@Lu~gNBm8 z;<-6f@&-S&zuzFU32y}hs^QN=Ye6%8fYB1k+JwzIexDQqPo=Q8xZ<3Dg%lT*ExI#A z6Fl=2v)AwKDZsChMD6eUmfVxl$MX~KHpcd}mGWMG=3*7Y+RI%h|FLWJzVTnjgekGN z6I)=m<*mS62k4|!ueH$!shgOGlK`$_$u|TwIn^f^72uidFitoE4eZx)+1p<`#__1lh}+S87%&c65SQ6J0Vs6u(@*+ z^q-i5LtLsKkTwR&)kUd{RaalUBSq#IqcSa!H_rNw8$>)IB5jl{BlDpCPnO_=0(U*t z&d>!}VEVjCv>osPRCWzPW&r%E)`!h_taNsw|G8ZvzicVJK$&`IbhI6?Al#B<4jOIT zANN0nWglm;L!&7$Jc8tib6%NGO<~kve*Bg7ZK$jile_^^fy3+APNP#rw{t1+?w79-voLB=zGcr1b=dK&b0P%p83 z68p_Ztc}c!RI_W~AFxFdZ$YN`AxR%E773(UQZXZFydI|1w{Q_t-2_!}1Xv$(rvU*7c2UDK(OH^!0>9YK<|gi0_B@N>*Kqz+q_> z_F&KJa$LMit;ca7OALSzD{l~Y!iTZSmG_Q%IyoB%QX2HsD0a5}w({z1A627xA;hpC z05nC+A?95m()^GQRp>REwoxGw>|($E=AA?sLHyf-jEfg4U3-1hE4wDljl(dC3a{wC zAA^)8FbN#08nO85ujc+4cbPx0aH7BUQ48Mw! z%aJX-sHCW(#NeK`vc={Hme-2U?#Z}eYP+F7K!5Tx(d_)@246Y`>D5a)pJUgIoyxwS z`|g83$(Za1(}b*THI{j~(eDo^KNY}Hsrrf*m^Va=fm)@iJ~vnP!y^2(SWs6^>EK(^ zhPrG%tEhumYgYtqN2Wh{dyOVc(r0?>aINer`&;~b7h<>e8bP$u*4}QuN@`3vJEbxF?Ow42 zx(@NKMH^{LzjDI6po)x^Cua-QX5UIc4fq2;xtY^*D3E2gd4q%Y>f zCQ1m^fBlg*^-18w^IRbxb6!ZZ)XyGK$o6hVVl$)pJ>*qYoz8heh4g}6WxO~8M(?lJ zrD&>C3c0tphidgA@KCOK1<8E63IEo{(;PcIHE6uJoHg%P@FPidMcK}-Ef zGiQ6LyJh;a>6R_&-RaPoh9XF6kx$0M#CTp*^i$t^-+xrbx3i;Q-{+%r)3`so_jKEP z$I@5lZ7;P*8Z2<4hfosT7UlTtY#pc=1JG2CMa~=Iv^z8I^indUD2X955UiZG!^pl) z3z5`YT{Q&VYsutA+_&`30t1?xY-1!=rUfhS$-&{22CMF=v+eSx$#vRXGkf<{kk_9{ zo8vn#A)Av2S^d=4qyx<73n+%qF{7VhK3qwYJ2U#@_%!w&!H1704hdrGhYj|Lemt}R zwe@6?FJ`JO!xIxp!2I$ol_FKXKli@lEzdAov;DUo#n&+z83SOf#pYNcAwXG{j=K~a zhhPLv;mNc=JNTjVbw^doaXa+m2xZ@1v)J!-R7>HuJYq8Lxf}5LgdoXU-!ouB%|8Kt`&Xa6`Y+UlFFw+D0imC!ulh>o(?x1mz^95Rc)0p8! z=RhrF@YmId!aF6Ssm02sEkG*UR2DCKzYq z9H}X3dD)=R!^51pB3y}40398&y}d{RzJxv9AQfB1ZHvMds(2`5{dIha;lwg^m|Xfj z0l9B|Uj|HN$)*m;W=lBF$t05cO}5RJTO_p<*k4@=zvXKH2T-rGd-Cw%L+KYUCjMUc zr~kX|O8`pk85l4=*&J4fBw$2~cwGyE989yz)f^cg>0F?hJR0Sj=OSekQIvlyrEFTreA zKV`~>8Z#{Hb!5LbXa0j(Sr0fD+LlIa>pxaws82c`y|Dlq(8g!PYT$oZfTF-}AAvZk zI_^xx)Yb|D9#!f6`JWHb(Q8j0(*}Hf@QNIhR+%F0T+*!iq~x!Fd`A*G??+D0N2BX5=LYs8cgeuy^kRdUxc* zH&Kn)IByMw0%g($SXxGxqcVRi$1gm}V>b7VQFF9^N3`0a>)0A3COgBc_MZ+sD;SM@=!7ty0fJ(Kn_b(A=0`uh3|OiYtb z%#zBOc|_nz>g~^9g@V;u>-02vL#pjM^@g5!9ro zdZ$iWz^Esw;g0^jA3oHksYm9AvE{4vV_>85w&)FUHmK~wJ+)b0TR2rIgy|=C2p0_0l0-MOLB>xEKY>(W+sWA2HSjbO4%xF8jYmGwkL6jAmHx+A>IAF^Y+$ zgZ48mJMYwfL|dy1csrrt;VUt+)U=|als>t+xhXj37TybKZ3gdo?7Nwn+e|s}9uYkI zRKuj|%oCJzKcYZBEB-bct-Lqz@Z@1cfkK=_%`iG6Vai+(d_Y#Voa|1>lX| zbrdUU!NbB4Nom)CZoJpDa6(JI49wjiS=Q$tX3rP5{6cqd97hdwydr#db`~)$2}~fN z<>D%@ZpW9@lF9ww>8b-J5=;{I4n0mzyVY(C6<${tmrtKQwEzbuN!(KyP>*GU0*8y6 z8?YSjq2f7VsC|K#EZDju>6J=RgC(mFJ`Q-|KK9``%qH`(F!ya-<41Qgbi#B=Emhr88h)T zf4U~1$Zndi3d1URo<(CEtLSzCv=S9Q4UGa24cDvPnyuM79>&;Qw*b6E1)Wz2SACzxI)XPblIDjR_xjZ-NsB}1wI4~{z0v`9Wp_L%iEEuTi4r6 zKp~ygHZ&Y}5GrtjA#jMQ{9uNdxd;O=#n1jT#W|8kM@Q;arug^n-8TkGzkPDI@_sRE@8D~qJY&v(Y> z6&Nc;bo#~JtWVbrov7>I^On-QPW|e3q0UyZn2skorl{TElm{}F9=Dep9bH{e&X)r3 zX*inisbg>$% z@Qv+7!)Q-Wu_#WSQH-wE>yT0s?E5zd#}#%xF9#XI#`mf10)wF2^CVI_>?H?(jM$zQ zX9#|%?j)Nd1)Er0x`ba8As%T_)+Z$)l%%&WgatmnEDF+3S}Pg=LSK;FZ;Tk}NA0?b z55lk)z`p)H>__tRHxCj1Ysi~lfKwBU?R(764`x9IyjJ04$$Kd;Z`>L7D5CCE75Efk z!={EqSaBxa5K^LXW>&&OV^eP5ZjNd#W=r!fw9cU;5jbI zy+M83K0%)isq=_u2+J;(gs(e%O4gMIgjdh!GpeF3pIMmtC>H@m@+b!+91%7K^cxHT zosRKcP^PBttMiIs5)|6LT;yF)sns zCnP$$0C@ZH)R~_@x3vXgZY?xRl$V!}6U4T!(KVDMAv28#+0ap?w!gZ0CSf3n{P3jO zfcqDGHq548Yn2$xiHwbI-^zLQQ?H&kieYx4<^?ee5x6gVIYvTTTU%g*KqVNkBbog* z0)9OWW{Ar|w^an)(sVpLv9>EP{c`oEVqU;xt3!yvct(e5FjMV0P@^kkLf3XDL`?Qa zhgC_T53zH-d}uO=onJYre(GOhU~LW=6}qfV#saT$l%3J z3V>GLE`(Y}xW|wF&pq~g|NgzpR$d~hm>VzKhpMk97szNhV44%es(y=+oZrGlqg+s% zo%Kq48!n$l^ItyZcN7Z+D%Hp*zhulaZ9lK+btwT3jUe+Cv5FgefK%EteJHvvoi z01eH)m#s!imAj&2eNo+*-90LESy^6uo|>3E7)?>41-Gc7;bi{%V{J8}xGBNpcv*RU zE^Pn%A=boFa_WzX1!Di+`O~2aqc_4%JKcZ-xu?7P>#hy(=b1d+S4WmM{RvZ$xT$)*l5J7mFbm#;v7tS80n(S}^mHwW?aSA zdNlOewV8kct(v``Rxdvqf;S^0ySTkYJE@3HTS7-19@y%ShTs@++Mj>x;^OjyO_DXB z*6HP|puf6AFuHvSG4wJYFo9u_ap%6aN6fJV4yz3)xT_WVjW0RGq{MfX#KltfcX#$0 z3SInvaS&yF38J|gm^l|ffbfmC3$!#1rtq1AdM?K|hwHZYBC;dgLJ9F`&%o1Z zyJoPGhIbI5;8EN+R{&TXWv}B9H{6pkb|1YS? zuZQos?GDR7m=fblwm+6A{>)iIlt&bp$1lIJ8mp}?_wyNzTUAK5qD$G|yCok25?$}S zKY$q2`S&@|2h_H`%=@RS1&eBHlK|zR5*Um6Rq@Ng$w~2;OCHgJ_njDc+eRR6jwd&F z78Xj0d8_&yUtSv@fAdXkrh-O=Iwf3mr*FQ)Ddp`j!fge$c`-0^jM_ORJsP=4 z8BW;)@Fws3x3&OUzTVB@FB(5SK0-#t`nvlG$QB+V!a(TW%ClW~7nqgT=W`D^Bs8?p zfn%AFNxlp;@q=AUX=!N%!miyS3{sI3bpqZ@9#*T{2qbrNusvy({`TeRm#7(wn!YluinePTk&;FvBt;OAM!FlMK?LdU?uIR`q<~0ww{&+mNOyO4!*}ibeZIf^ z;Fvvg&6>5&T3;%E2B(2i;#vHM=JF3NxaFIBqvh>gojGDmn)S3fm3njX8oXor&b|OcfN&GZk=; z@?U4SE)MyZP6|i6lZnFLudiB{9R3AAW&$?4uhSrcuNw;60t4S$PFvmodxigh2b=c$ zJs7Bfq|6?4FWAJyTDx4JsHm13z5;o7mFAPke(CxHCbe(T78aW5>q|1T;Kiz#@L5q4 zUT|az6@-B5On`@nCsC^11pRRCOqY@xS9t!S_OE6A-~B{7J3qe#!FoVxt_yNeYU}Eh z%^OShp-B|cxpK_l{5?EA(y}cr8p0Xcv6Gb;tEs6`P*U1I+?+yZJRwAMA~4!aIpcr( zkU7>1wfoiDt*e432PVAI|DGI-BtZzb{6}I5vsYd0h?E>Iyu5VyMlbNd)67tZxz>2ZoE1ifM$^u zNB~h8bl4FI0vj+{^Zu+v zX~Z{^)Z|RBcH24j5aX4b+`2khg|JQtz1{B(d&>@UnCc_`zGiYY$Eg4MCq*mckrY{w zW0~rCCK;om%t%cQSV{gg@MUlWLps8H@R0<~785=yI55E=H}J3Pss3DzjZQwiMF8e> zN^r59Jr44oD-Vl_cPAv5w+BsYKh^7Dn~SufE(v+V{v%b-we@!H2ig(JP}!kCro%-H zSDU*DT28%#_g2r02eXMXcEqM1jjHd)j5?DSm~4@17F>zx^i0_wK(~GXfGp^^F=03D zcr;{rMNuz?xaPqsa38ln;r_(pKpc)7>VmsnZZ-5pG}G7Y10$IvzsBR6}cC*p9A<+LE1hysn-P-2?@!9%d!_f zY7E38E;hCY_*`}u8#n+5nD0XkZKxE9!638)>H|n;fll~fPMAZNL{bdW8kaPH;c&r# z?I3uei-ZP`fx-TGOjiB?@t?;{9j6ylWGb%b@XrTW z&H94F?}98Vt9ZiexI?CxYZ12zkSSzvVUy&{%#on8V{$t)1O_VbK>wl8 zaG-B9hxS&yP!K{rm41cdz$h(wCE5IVA!}^Kt2Unw9YHe?aBy%=fogNS(aiw?ok%sC zOD>ta3s~;}=7b9E)`-%f-=BiSS<4(y@IeAmx!pQ+F*CGqRfaD!N^xGl zA3Y-0P8`V@*+N>4i_IoEUq=X#^l1;a?_$?IUmC%@!NXg3|vQ|e&3NJ z=#SNt(`Lth9-|3XT(A;^Oe~^V2m= zRWFUwv_*0s*2s=TXthQ1M$$+5|8e&I!Tlg!i#qTho5sMPAwL88|}Tw zNs{UlbZiK>o1ajv^!JonyuC`EPg>)DI!Tp>mmNHAn@&~}^93yoCL4&`_cX1)ooM1J z=5aFIdq6Y9c_fJN8~pvoJLQqTMr%5z7nith{dTu#vB^&@`j|XTkM{Lz1eP?2j(T;N z$;19~vEgjlTK@b-=hFOvc@cTfk;1;FT@$;&9Nj z>yPQJ5C76Fc$?gvDgTXpPiacstS~t-(czX@%J#CFfxp{s&u}0LTpO=zAB+;<-+Mj84E7uEWYQcYMcn?{(}rFHuSj;PXQSxInR z_Sp7^LUbBOicg0|$6c7uC-Ky3pWZot<0HA<(!lk6WWJ^lH9#m=QtG?f85YpK_uZsb zIT=3-Xyi<1gz0&WQRc8tCgu3u_UPHtd3I)>;5WoA`EBF#0R{i;?~#>fkMJJS+E@f{ z6<#lcy8Awf;Rob$PNy4*@J-D|g2ob$75=X8&9P5zk8WV9R$im*@-GWMt)b0n%(}VD zEQc!}SDcT0MJ9TwAQcHV835*R7c{HI>Q%zyl~N-WPWviQDcQ~aQAbZtF2Z|gUnVO1 z2_JRA`~DCh@)e%9Xa9Wy5hefKVDOT2pPcNJz;Fhi%`~nkRTJN9&CNK6*67^73CaH8 zBJc^nMS5_XPG&ub^iR3fG4nzK`q~!yHl;vQM}qD7NgJyLK3wZT{`B%csCKV#x0o5<};EbI=xugDR3Lqals7QpQc$@e!ar57;v}faw&Z-ce-vuTE?e= zHC@P7oG{UII31SY^L=M(!G6S4A2ls?sT~nzI$FcDhl4J(HSKdkm4z_#%2+)j2Jbnh z$bBV1nsq-Rg6`{(_0#tZn1(`_>G+O2e}7E;iC@N6iN8I(Dykt}gJKQ&%UK^@+TaWQ z*tSvbB|%$rTiGHJ6=zR3lR<>#4c#0j>paGR`KhtG*>}>J8B9f@{ICt5{9&2KzWf6j zV+lhcfa#cXV9qUDpYwq^hk0pxUaq5NKVx~%=n z-=J-l)C3?Nz;xCDpm;emGp^-w(4dbSk~N;YzJ8u=`De7}VrkFC>HaSAHq|m`T;P87 zMV58K&~{?PxZ~hCs^tNSW}`bBj}hLyODUPOP@!C;Yur;@E(aFza)fo6PC5C<=rwHG zTW<*vRgRE4duL%R@LL>Bc;xNw(Qe4>I3B0Zi&5(RV-Lq^`zcZ<{X9IO&#IVz1c?5v zX+$h(x&PbRPo0hTPEZ?gIUSTGw&T!x*!|^TPJ7HKfaH&bJAOjRF*JQc`wiJ7a z+POL*&u>|8^k$9u-=#GCNYN2wT7BUi2O%yoQ|0wE)%`BQZ8EaSL9i*HOnh*2M=bYo zXJuVz+#T|HRU`9un%0KpPW1&DR#@Kght$X){ajiwxv}kYlbBfPrUYoo^|ay(8Do`h zo3!Wtc^2%Y%8l3T8#tQo%inS*J$W;X0n#WbsR!K4sHFT!ARk28ECnoEDYKeqKdRd% z5f%|C{F)Di2!70&uoA=r>>f-ezT`s7Xyj%3?O?FEg(sRed2Ts*R2Is+ z5>XgipVup8E#(sFW={Iyd*7N*Lqq&|J8Rsl^J}s}4FePItPdtidtWs72C~K8nh|~c z`kjYz$oI6-fq7+@R8YZ?>6dSco&SNp2bWOFZJj0eZjjQ*Q(>dV)XEO0yTmuEtGCa^ zDgE`o6n2)R$>|OrkV1mrBu>$y9|chkrG3*ui{5>Bj6$ag4xx2!9PBW%!lfxRqD3n- zxt&L+qAmZ_W*+dTct9R~<}kR)qg8RZw6T0)k*u28)%_2oD9bN+fd~mTFfQ)w`eSd< z<2{WFaTJDcQxe9EwA9Kd8K)=Gl%@YjvjXe+x6lNnmw~2m2vY5$n(>-)3SUG$hX8Hu z336XtJv>N_&GrUU&nKkN*l* zU6z=T_)`kUa>j4x&(2nMsc>k$!FgMGlplz2>tXNeO^-q8-i5mti(2CTrK51=0C!^s z3#W?XcTfBE`&+G8b7wNq#h9d|LC`e!RE+RTH@RPenFq)YR6(rhdQSw^r%zuB0Mrb? zn4~e&jr`n{49N2n^i%^*@;{JnFs?7r&d^GCea{SEh4=$~MVt%e z)pjETHqNiwCnWYD6!~*f3&=o$^ni8aoBA?$>sC!ki9X&G@%-R!_hH%Oz&2eW7F8Ig zJv@!xW6xkiITI_Y7E`z-ZHOsFx|!)*e24+-+D-J)hw*ZhSqBQgD?P03&CMA5s92@% zO4sk0dcRmNHS@;qOeyM#lWU%&sMF3{e^Ru5{d8chew)hiTjHBh-$;M`j0q0Ix7nnU z5Jv;W--qi(fmvna%4Mz>AzexV`)N9Bwc7{Mvk!IbGk@T`U+Bg0702+T)o%SxA?nO( zQ69NWUbc005ms^TETx-ad}QhD1n62&fxgc62@2LivrgU@UCx4zM``3SZ*0 znqnRu9j(7hF8lvlfLIbBOxgqA+4pNU-cf%!Y_=SI^!v6`E8y}390^3!;Z$7FqZ<&7 zF=&4Ud2Mn#Z_Mv*-}aPx)KY(RPH%X#(vw!5zB%|+C?T++%2JWAOU0Y(35826xy=2M zU$NMBPRef166S(<6t?vI_3UO+$ZRm_FO-Ll<2_;kZToQ?&jrj^}fGF zA7@rMb9Hqe@r|9n#HRs%t+rO@n%DZbi{n5I_9@Ese)p{@{n6@iL^W57Ru8=Av(lxZ z7;jUWUdLGtJc23jlS}QsHWDiAIs6m*HQ0*zP9gG-^X|G+XdpAjH~wLSNId@KvTJk4 zqjV(HdfQKn9GEF7=wdiBf?A;wjH043Ekhl!F3X71m)jX$0S1H}keshawGHeY?Z@x7 z-zzUtzGFi5^-YuKz;|m950WEbF(xwT2y92ecmw;FNNOW5r#4bGGD)dH(b_QLY7R5zIFjdMtEw!ieQ-hzh*E`y9RnK0&cUJK z;=4T&x8n~$W7vmp`EBUr(6)86&#>(1#fV*|$TDkU}4okM#J=3h(Q{A6t56C8O(eDipipq6rv%x3vyykm(O zm*jcDh_F1G!gSp4C7iDZGo;W~uFgW-vluen!>G_}r(%`R*GQVkbX0%ks@7lfw~#ej z`CP#GaV(nmM)Yd{b~j9N{lo*QBnOLVkGW`%wr5MLAR0$aa7-)vA&qUqi2iFwRnMV+qrDpRAFd>f-s_8!qE!&ytwXH z<4CU}Ilg4r%ly&7XK&Q08l0?t3`m9Otuk41`osOcd}4^`g602HE+>ws^RG(A=jBoY zfyk`z zx0iLUjwW4>gLQcF#uiR=v<^G_rRi0j1Dxo;7I}eYjrzzTiHh6hLPqAwX;g7m41S)c z&T6(Iz64<`+1vR-7ff$wbHcY*ug|=PaI`7b)ihOog?)^(ge*;th~~n#1(v1y3%VLP zIDB#<(5Mb%0xF7`qKD>PY*#8n(8-UoO$JP50?ViHsfUd*zC@V(N{JS0V?=Siu7FAL z$`lYGX^IJe#LG#kzfXm5hA_})ni6gX3B6oYY;Q~cStQLXs#@U_dbo&w%<{^4aX0PV zVJH0h^ot-^TiqTv>yVdPt2Fd2xFte0^L6Qq^PY8lArptRx5v`*(U868o#i^NH^iGg zMgk|7xjLgOQ`@@}qu1?t3P0@QWJb&3E3M&)c|@-H31k!fX6asiQe>O@9uZfuGDaXE zx6MG{fUEi7M)0O+EGxae12q6gwblN?RkaEM{V?e5ajVPun@0q}I2b2c|w4LN_2`=L|7rmI86~%8@~!_LZC70I9L;7{q|qX_LEBjGY1vs z+dVB6xWYI6dgLrlc+%dQLrbCMS*K68HcZ!745+*KKC?q;(QSTLBHbs3Q?E##uVubO zZJ%z4zT_I=?fF~FB=>N}cihArpoJLDvK1Lo+hRiwi6BT1&9XPH1-?yd678YO^AWk zH67FTuhjTtt0?=X3uB8^e&MrB-u7&}c4ik!>JOyPyn_h=sV6nqgm%OlvKzetuC*Rx zPA4yU?M!GGpKV_3<9T;3TT(u;U3zni*GAXsqmMkAAk51ayPk|8?o9E*MkB1OT&yM+ z-Fh>^bMDV-EFpm(|F3FQbF4ttFRPm zk~SO9-ZH406AKh6eKA_qAih6^IS&U5OF>3i!eDK!d-~FkP{AkbRNBVeavpVGg{C+L#p?I8TGMVo3t=`_`4PzW z3b&B{Lz(gL!^G8VtYu|nP^DuTvcXIL#fq-8N=_@YyxKFv{puinjx|IDS79Zmoq$y_ zxuK^JGQ4Q#ED)P1Se$a|OR%`@TxBnx0Gh%D?WAmekF?aif6Xz-mlZVq=8iaN1gytt zcWA`%wtSyIrFi*!dUpLTkJ73kZ&m)W=s@aY zEQp|5DAVL~vf#kkNdGugH@Z+cf@JY)Mc%K9?cF219QEq` z41SF|5qgNX|K3df&$FkF8Ewd|An^bV|9!*E#bSfG=&|wk0||^%M@MO)a)W%n!^m0x zZ+FdaLD7tl(UB$7&UxR8bewnh_PH3U+?q<{QbuDkU&|-uE9Dl9Wl9uYTwEleC}@rW zQl?nF+EQ&^FKK<#M3bg-Jev;8%^$NJweo#EwGJ243te({3oKs7p7B=jb*_C`T-P-3 z9$--ZN?j`A1bQ+bcVGaRD6Uy5uy=4MW!6 zFA)ohHEO1uIfZ|y=X}hjnWDa=S*PjSbXB*mn=g8y=YR8{{-@wC9`FB853Td^?f|Qk7&Um4pysU zu_IVsRYbp|;}z;z*W!??b92Upf#I82hvbFFJkkyEy`Q1>Bjb#ms7iJS@nV~f^N!aa z-gn}0WWG?SC33cBQQ}JS{H+v!YbuW6Yd|&^Gz#rx}W5xVsjIlF)E%U5TRJ_MrQ zfvP!2K2^^41U{iR*kUe;%fS`M9%q_7T&#lZry>NXz14H5KQ>)7k1ugu$5d8+jf{Oo z>#g!|EjE(^hr9AKXysUmO^u=y9SSIyHl85^c?0XT4ulUuWb>I|?$9|9oiMO#F&RW= z=Jbc`{MTLg+d^en?t35Woe%qUp3d-!R4o4EPCiUqdLU;8kt!;BF`#FsNGI!CdBf2Ha=NZ` zKC7{+igG_fnBpn=K9tZ;qfOLW>rSRz%g=1ahneg6D;CDEo?@8)DxR6=4J%BZWq zhmV7TvbC0vhy*vLe|)U@`sML?l7kQE#PocGrw10rzG4GneQ&Z*QciAbE77LJo^c&) z>EORVU;(lnQOhDv>i>KbD6F!XKw>y!$1`2+BBD0fgf}%vVvWZH)Y6g)eye zshHF4A>knOGFL&$?%~D`XeV( z(s|uotOENgm4HCYe`MtA|6|QF#CPl0R`^w~=slUSuMUe2f0SrDHzWfhI~qvwPgXll zuUo#d6rK?acqf5))Tm#-1OX9T$R+h(FJP3AV+Ukek&%&Yfb+Kinjum8|0&_jevj7jhMQie_ ztLdxTrRg!}M6b?Cn2Qn2c-<^Bhxr`cy9Ez)6G-s1#9e zZ+@t{Vt&3MA@e_GI_5ptPYU2EW~({Y|9C`l|C{LlX_%^xL<{t8GTvW`j0X-u=j4bk(+QA1 z0KtOY)<~)j^!lF+bLP+mp}(NYVBEdyjPy-FKCWrZz!`2FB?W0^TKUyNTo&@rO zQ^VTkX>iZdBtAQAQHt&-v(F^wno`hh4g&z9mXwj{92zQ?I-fao1_w|Opg%yc(%R9H znLrLjs5@gQ!76KTX)F#~GJ)vCG#122c4+_itnI?oZ=2Ryl*n3k{@ph}hz|k1iK(W5 zYNm;fYQ5?!Due&goVP?o^Vu;QF(cJ9Jdnkg-zJP2&(W5 zPa^#1ze54D9fTo?2vR-Pv|U@a$%jAh_EnpxgTDB292NX-*Qe6v*RD-uv;S0XM^Frh zIFVdh5VK)q`(TK`9{b|?mqNS9!w8p&;CPz&0bKf2pYo!5GHx8R-n2zb2RlP2ftkl2iQ1ZF5Fne3CP_M1@AKyGA zruK4I#M(OGMjZ>@dL+x14QxR zm|W~T>oPIX)86nJTj=C=_ z*A;8gQKqQ@GL7^$iX#;gqQ%WhdWty`W+DG$8?5T+6K?vGyz6xV!`OUDQgY>TdSSI_ zhC|T3@Vqb-=#C{5&x<_Z`_}X7%wxg**|mFVKHd$x=ghblP4oKr>Gw7AUOAqF>pg*7 zT;f=r6U7w4-F|fW7WUKa0pzB7hnwH?lwY5BJpQ~*`eD6|{LS+|{FA*4(Gi=&4ssdZ zrSV*an$k!3{v?YTGybIsh4DFMl9COGk^E{6D? zz09i3Icdr^fsMKlE^oS)g=5*^{YN+227WDx0usRx@yio?9wiS;Tk&GCsl+8s6^Jjg@o%$`S?J$u~e z552BImbfGMcFzz^sfy@2hq z4g_&;6y^y3AlTg63XF)rczSvQ_c+h5NTu+BxS%6coE#9%Pyu_9YMC#CR{hGhiw{Qv zV5-~MQdkkP8*Kmlfl=7o@@dnJ@-GA0>D*Q9#b5@zk^R6h8T|4-3<<34t!I zA6SgVtk~bEzLJ|<+IHUuRJt)57ouitVdy9ibuZ)3Bt+D(F5wz z>a_UKw6}lZ+`fCSev{`YeHY08z)-(U*vNh871#s$p;4m|&vh#aoFupdMyVAcB80dV zUR@ZRNiKF?nc}LU1O&JQHHRI&4I1ZzL4~tShNOp?4J&rDJ%t1oO=d;&UOHiG?!pLpQ`(H=7bwzE*r=-Vu?tAR zW*XgC+Nz z&4SmgXqEJB4|Zy4&Uv@U#PL)#x@^foGjDj5C2kAL&nWLwTMM3BxM%aTJ!x(#CB`P5 z?Gl2TB{EX4uU-8(!Nl`#7ClTw#e8ZoTvba)EzD5U9Hw(vEw=i8dP(0pUt*PnaF5%} z?1*Uguyfi^ERRs2xubw_1EV_7pRi0}-T~1E>zQMNzJ_Q9sNB5w7nF;?33HUj01wjk>aq{fq3$-45}LDyMVF7r|lVT`%m{ZPn`#NOL}8G zBh1(I#KcG5zkHf_#u|mR^gH}U&Dq4RoZ?wH{IK!l9JB?mf4vL6D~yS$LLjGfz=V;A zw(l$FG#b5OSN@t&w8(H{9sZ6MBQ%>F_qG5*6#%c2Qy`UKo)uim4)Rnp5hAs z1;PfPV;=&F!vVM7Q52F<6pJP1lLca+^MKBjK=BHydv$pbh2$6bNTK!lALQqpEZSaS zA~c~T2W_9fu+=zl9L%AS84Fzlj0&6892qS^*w*&4_X}&_R1T!#u1K>cxx%8^*lO*K z_{3RvOl*6}GJ5%QV{TNj!#%7DDI_6i-hZ{)*mh;~MG@K_)-v~1f0lD!P|dF~Po%Ow z2`*{vWc=>hC zJ^~@B;BvEw>+=v^9=3Wpx0$mvF4x;_m^{Lxh?>*Ao^YEW`<#WS`%L|HG^j{!|j;t>@sZ3xgu zVgmnbjyLqCN!C|T2MdFgE!AdEF6f$w)gapDiv$YE7!ic97n6q+_xd!qu2=*_-xaTe z2HDf~`y{F8dxUIFIh!eoYtKGQeePI$a|4Hn{dz6pDNq|;t|6c(loz%6#Z6*ZZLu0f zj%KHx)9NOkuxc1)NuQHLonRq6JRA*;dgU~obNcu$rf^!p9$WO%M()EfVd+!!bTgm7 zP1v1kvfS56Z2U3P5`NN}0r9Ne3me;s<+vAp9q#+OZV4mIJ^+OGvHroJbV zWM@#rNpva{nz6t_mICXKIZZG~$>(?J>_`ckUM0s0z^FI1qcJ~9`>dhlvh)pAkuwe4 zT?#IzBOliLx+8Xug^8O^p+?Dxnl z?!u$it6I+k<38h&KTdSF<}VP5*xqs-VR82mZD%qzn;f4~XgYHs9Ah5s@%jgmycM(j zw^=U&v7N1To6Xd7zni4{5HG>xI?Km;7a zEeEe3BILVSTdJO4C@*Ba+Qb(78RmC?^es>0xEoVh<&W-|=S;QOXbIngS;iOLSN?|@ zaIw)j@F7&JlGg065fu8k}9FY35_OZl-kVr-MdULghQ zUE}bQB%=Kp#!ljAF#U&&fWhd>Ychh=e4mj=(7?f6j%fc6Mceo#I??IE!{w-9cAB@B z(Dv|X)wfABfk3uHsq4R=(TODW2YMVNljeLYdalamUw0aeJceamr>=*MZ{J?-GjcsG zFWGLryJw8s5lAQZ=??bRmSafb?v$6tEq5HZMyJY33o{=cun~~E4?i@0qaHkP6cCKb zFj6`_EuvDHcKN36^7_2Zv^G=Vt^^A+O<^KydK(*_aQc;|^^Za=C!OsNin%H6Iw?L+ zkLgB)qrc0oot15YF_!=FLMa9EDg4gAHEnvV#_gr=5Pz4zXK)G&rdSEk0PJo6{9?e*@dMDKU$1_F zzr(nkM4oeLAMu16nc;E8{t~oTAsO)SwN?2}wFw zt_@{tA?hsFMi+lO6QfJ@*O|_szSH(*9&*-+NLU*eCEEV8!c?Cd^BczxrwZKj$Kb_{ zdCgdoT)p!@^jVu9&ad<3zmVslcSwXAJ}1QUKBUeHB5@rBERqacP0!<_@y=8MNF?k#deR}a3k`d4F*v5_X*?V8J>2yjQ=gnsFSc4WTgRRPl+uIvE1`4gT+fl099ucjd zHVfb96v0@_y1nHj63*1g2`kEw_td?KqI91 z?!6S*`+^hpy1J89X&SV7YScOjA*J%Dxf=h10;t~|^is^`6M2c~rUeE4K+*#cV-5_y z3rs~tP*Hfkj~{Lc8>1s)GuYC6cp0)7K0Dx_8v7~?l>kpt^NxdOSuT7sP_ne+h^{Rt z4-`d54FKX(0=u*x5XprSalIxbb^E6fWzFGYJIWbcR@x{OQPk1Me~aZk2%DU-YM+!o zwu5(Ln&o$_lhkXmUx77jB?Iw7$$Yw_^k^oL{>>bFYr{XBx~whPZ?ivBW`_AV*NHj# zV6FYE|3+iU)i)n+IDPV9^RebSZDPM0BA_N3)EmG<_0*G9YU)-{fxYCL7%Eu0j=e##tz}&}(PVp!2dChg_0bhEO zvt!C;_$KF>-am=;>9|xLE4MHGGA}nD+fbB4my^HL8!M4UfpAhJz{Awi#e?}HhJ9^m>>*fZl}5?KU3P+0?$+WtR|{F4#|^; zZ6?>az{Db0#gadCYVGw-`%n)R7%n*$<;BGjprT4j%0NK>r_&zzZL{Yf`v70+2cB31 zNwXxmzScF{mF-Qjzq5#Ba8LZ&u5TUB;KrTcQU>)y79~rDgj7m!l2y`;ZDF`nG>kCh zq3u!tD1pG!v2o-T20fTSuOm* zG~s_MB7VN-8~d&S9TI;TiVKM}rvM+wx!?Ne1ZmHYCgN3y*42`iM16O2(~JtM!Wtlstz)R?rN;JU`1L$X-} zVvd*VQzf*Qg=C_`8`<}X#WBp~f5oP2`hHSx5mfK)#4HWMmSBuZJr;yZP{5oy|JV0< z0fT_}l$i|fPGwhkLOC#_HBAqhzj-ttWe4TnwmvLN>m&)rp1_F?>@~Un%0^!_bozj1lZcAr+Pn2x^%obY9>{&E zaNvsfjC9zY-IzVN!4K;UfL}M7FsR}w<4!Wgflua5_eN7jN|M8KV@FiGRY|huAoJ$; zzq{A1;_0jEODm(0{Fe+xxh111nE}e(9s1U<)aDgc=O2oE^zr0e13nb zkB;-!`kyHu+sol>ne`<}oASdI$xmmNwE-B(RD7Qosnu$=qmVPk1QpV98+&KUm@LBE z(^&)Xbfz_5;eYdLzPLGm>H5MH_Q%g%kwW)adBNLJ>xon*!m#h*1d7SKbF~jIB?*!_ z!gyrY7LVD>9=A7ny=}hv4t&%-Se5p?r|hi3SU(8XBI1huNdt!zQ*>^^^)MjhyCyMO zFNOzecsuoFcS#zgKX=1K2xRI_iU;K82>Kzc9HMxHw{JhvR?#QGAfH_ z5~Qv64h$H8Rdqkw8Kh&t)2R1j36t(aW0-VM(>6n=WYyE&G4L$SM=LOO>SW*T_0xLq zYB2=QKxrJ6Y(?+1oQ?Hn{Lc+cfS+33LE=55__0vZ>Be)`z z3$1&YE@iv>S8(lBN%LG%`onY~w#2Wy{AnfE^YYMrt~=s`s_bTLjP#5r*)NahIiL^9 z*qr^6|JiAertzY>J&TGj+Qa(hRXB-{RfF!dt5D(<)7=Zxar8YlIMUQ$H0BgA_k);b zCijb-PbLV!1epjH&Yyw(D6bVJP=D0CQbszna*nF7=ZcAv`{K^7l8eIVm-0hUz%vv9 zmHRN}ZuCs>THIP$*P*_5aG*!uC1cza>6!z3?>MAp?7LiHtHMXyAXTjqwk4pXSS-=H;$DBhS#7*7mAE-R*tF#R z^?7ZzY4D~8o6dSN49oBCY$c1zlz_e>tgw8{0H@xW@u{`Q2_>G~slw4cPXkfC#)ooj zbe-gfP0q?TgwRsy*qxU@osGwv$zFc>Zu;n)SF0f)WxW;RMqy zKECm4jH;37mrT|^9U(4#?m%aLXv;H6#vl%RQ>tU)&;0C@>*w~~vcKl>E8D5M)6O&1 z#LkvMD#h?G1BI_iMl0cOZvH%bx9gYv1x|F3w*cb4By*Y8^GQDdDM`1F{O_6><^qv=80~3*U>oohjCO9+3gmar z^HbdFd}lf-$RpWqzpv2-)I&W|TGX8XWe*R@|5bidGd`T((ZJ=+-y#|v%teHteq}20 zyC(mp8?u7E(lb zKhy6Ba|NWDlLAQV#KC+_0j{|L!Oi14B3opZj0;OY^&{LurtiV1xl` zg#A0eZe_u*LomLujV1msi*amYQqS8J!|OB5fUNma-O+IL`;#Z%q1TqpjAYLDDwL)5 ziXh5!_2->4ojM9-?5edl=3UBH)HLDpmW(8Bs$IF?4K)nDQsu~5Mb!DyOwKrbQ9Yef z@~mYcwUUhT0>|5_@J}{6!OJ-EeP(ZcI`#(aN_AB6Ogc%$A43-Kq>U)ETwOJf=M^cb z=!i%!x_M0bg_=kRSij~e;niRHlo{x*V))`Lx0bfFr~Cai&rZ{luDzBY&6J;|8W`yB z*9YkSarJmz zwzlRKqu+~UIdb_1ToD}D#11j>lz<{nTFRzy0-+~=#G6&4(aijn!ZpwwW$g{j&nGT5 zc|iS0)z*vsi!RGrvm#Lx`9!^Gc4Ff&@Nl2~_qxpnUV>zpA1m*UX%Sgsw+U(T%DaZT z9ip6NOfJqmI4{(62FaKm4_rvm605To=jG2L7$KA&7=vy@yw|&)#yFbLC8G+s`S|$* z7+Df+p8j30;Xk(HUmlO_8h>0>WA?(l^g|)GJgsz>S;DynE4kgcex!-K0kw_oM5jdjarPtUL3 zzi(BlP^F?WrNs)Gmo4cMcQ1ht8V6ly5U8XNUNSqCb5dxv4Vu12A*)rfASRni!*d+qO;=_Q6BqGAKm)NbCLn4F!2JP$p6w_3Lm_EI;n%$>D(_RKC))hdq?-7e zZD29$!&7&;yi3MtFn`#+g238Yb_TQK@ov}JB%WmdT-R=h-23;8@cj1Z=MOKjd0Q-C zyvTHZ#7Mt{jgm`hJtPg5&0O8t9-lqe);ap*BX07Jf->c2Y2!eBSVw%#F-0|uA1c0d z)K{`fckIZ^M5Su8#;pDg`&WPHu^OMrM1zFy!$%>mMiB+}rmUE_>6o}L4jVoa@zKRy zk+!Xol6}OwL!;4X1?wq8fCT9G%#12jol%0yVw;nb16_|xDVOv)ncJzKwFKO3pYh_T zsN|00?Qu@fdTACCLu?rJJA6->2}bt~AJ3u`0$TN5q9RSmc$R}B6y+U7nbN(DWdaNA z;@-h!<)U02(>7a;Z>`kpF<^oFxT}Kn>Q%aYD!)u3dl-;b{L7V3?I!_EzR_2;?A_PsJPFIyu`+RUBO3YJV+k>#Y(&03DHDxKy4-(9;v;~oYpyz;l9Y=%z_gDSe> z^YAcLzh!fdN=ibNpH<7Cu}ur-Q2~y>9C_~m=SbA7O@$=$KNn$_!0`HzLcC;}xWT9I zFoN;n0;C@CnQ}U<;R*JEF9jHM^-TF)s;p!*zaptmz7TZo7yC2( z5FUr3?RnNF6Zl|-pcTMKQ|NkitaH985QO!1BSg;5pf0ZWMIteW@sqp!fJVjY2g#@- z1vv|c=2Q)Su>%7O+RA+eTiZ{kNxB1+^5V7)=*a1-9MuB@*qXeEavFV8xzn^4doL<1 zkiwILQaMoOoCDH$cr)b{(-g4-MsTTlt91T)g}f_qUY+ke%$a#jKVM@50^0Z*PrC_k z-gvtSmMvIwK$A^y3u8cj+=8QkazL)9Z**mnryOOj9VJ_|We5&wW@PP&uBWW{g}PO| zwBN$3ugbbEb-lwik?}S{if=MtACWLv*DXF}#5jcnuDNliid5Sl zNT*+{@Yf^)c{9=7UjW?Uej&D!j$@BBY7*ESc8D;gXKW7hLZF9!E`ognNRp?T>{y(O^ zGOEgG>zeK^>FyTk6cDAm8HKJ^MHVKDj;3b4bt#!y!XE2`^AsLIO9C~ z*?aA^=9+8H(C-uNS1Z~AeHm?n3U3^0oz{0^M?6`wER1}@bXBO{aFfaVW>N5aa%H*Y zi1fxttzBf@Q~MoT2hIs%29QyRzU_i-yB^qhKzg}V`>@b7=s~-(h+274$qtYY zhB|Z6A0bX#)3eQ|`4hRP*-M-zq~G86Sh!Hu2ee2}sjlt7S0{6_95-dbxL~1?p&z%S z0~G26d!jKou0fF_!l4*zrQ*acK81MhB=uykw`)r_&epWji;?4$4 zq-PD&Ff{V09HYxDq^U&oeM&Zl*l8vrTKy5)@ju;PXR&c{LaI`*J2Yu5tT~`T(ZkNy z!Yed?!>e>?;efjaovn(8(D7}gh%tc!$@W+I$MDeLCM8HmaWN9>P4*PWm>Wk74Z z!hc(G$8-9Pc3&RPX!_rDO!J7F_VES%vHOi%C}X~+MX}-9GC>5JCcGskpLqr~YJI!H zSv%)$>iN{5ZqB}_(AgV0wj zFj*oNpC*vSZm2C1OBd5SY1+;+u(`JVCLv!*vM`b@k})%*4%Cm=xV{usFB}3A9O-}5 zJ16(cjiMc>;BMRzt>(Ql6-QSW`&iEoG0+6ru%XR;JLGqGh`pxr;XLp4!;`(*Q-IqpocDlhr8paFy2Fyqr38PxP?X^MO(jnDTp1ZyY{u+ z;IFJS=q`$|dgH*i`%H8ND(nyK_g6)F>gYGEOw#t`_tkLz`qY*aShtwL7>9Voy!bQU zSenvkWu(;bLP7hx76?QpgJ-_vA`;Rvv`Oco^4L)Bd%o@aU`mQBX z42zJhyHDO3uBVAN{1_h}IC9Ou;=;wjQQqn2^SqgKxLDa;6#HzO0UO*T25s(eA&8zT zc(TjDc8ZDtDG%^-81`3sS(idqQYKY3UC`o+Z0orn0hnAggbmC$0s@=c+XXQ6E>QlR z%a&WvDWJ6&`YQd!X@7l!uQwh?{wJ78h4r;)D-^?JEUl_lUU7)B5V5>>HUDR$l|j@K z2-?XxjMHwN+43ysALy@YO&1$oxLKmmhW6Fkl(_+~8UTk<$f4ts0MOTqfHIGn?ko_0 zeYTDPmwdsqpfdCDwzIjDtR}k!JN#!h3EM|-!s+&J7mPL;4GD*c-)D?iD z>-3xolm~TMH!1mZEx7)=K9|SHi6{@F>9-FXR>K3D9XYb!8`DGZzy zKMfyBe|p0V{FeLJ^;8`89U4tb<>^L6MoqHcIWl* zpu4p77e$b4=Q7(&BN~_AbH=3{o8EE>ThJqbH!IHy_2h5zv6xH(3IQ3}vzJfmD+`S0 zxkP5>vgg+9ltNjMk&68*Mf^48muaKdsi-nn%mMj7H%Le$k1tVOJ44+rt?fy8oq`ovoP*G8dBGGCj zrU#`Nps$$LXz$F|F?4oH`8s@s0aPu)tTDB^x!rY%x4aB$Qvx0@am#x{V<%sG2F$;L zWT6qHL{1;lFTAIDH6B@31~3Pt%d5VX1=L_gJt_v=BO>HcJ^hC^d+hmA$+zORYK&!@ zMTVFicipWGp-nBUbbT%?5xW$4od?4y@OQX*4&yz+{X|432h;1af6f5)jQFqtdLJgm zCWIliBK)`0$trE=*Bz4CFJI`iYm^<2&|D9SCW?F@8#4SXb59xF#wYqR?>U(X(TzES zCU8uh+jJ$i*(YqE;KF}g70}Aes^W!0!y<43JvK0K z;i)%LPATKTOvL@olK8daD%*43>GIKE)#L<=zaF#E9{0W-#$OCRrvo-t6neS#jZ_UW zucqZ3VJ1A>?#J6*25@4>Hxj#y?>GW4$du0l#x8MiaGtSQplC@aD%v?@LDZ(t4#3py zsk)m7UuNVNuWIJ9t_d&U?z{nxR?vl~-q>cK+#|SHM5i98r=f!9$Zvqe=;F6B1g7ZH zS_o>{c<TO`JqJqA{3}KiR z_BO3R#_V!WaX?k-0Umn)Mhu~>51r^&It#R|6J4quOjN+y6dOFc8+Z_Qmmh#4?!*s|XU`ugI_I(H71$8q7z+RPA=`{`5h6OOaI_1qJ9anaWJ zsW8UFoP|Hjm>Yk&_$@c7;J3I8L`_uf%JnIw>IGpl%L;<(-_6clKVf)T<|a~ZkuQ7RI#@R7N-qti!&gmn znKNhU@=(tWsBgV`=0X$-_2!WOIw=X4yCUo`6t4>RIo8s?sFWRo_uS$*S*3yD;0P@U zhZ`58nd>Al%w+exc7pFP^gqIXtD({P^yu>}_q1O%YBqwzrpZzEFXm)JY_cUS- zRWT4)vL7op!yzG-uB^^=uF~^mVbULpVS}h~?m!k6aHH5iuTHE}R82F@bQWj(^tGFl z@M>Or>7Dvvk{4r>;qkS%tIe)s({u z2YOJwPQiT4%dT()YcLE03+r`IGw>V{L7*Pck95ugCm)^5QvaL&LFNr@?6_!z*8Ngc zam`q*Muf#MkZ5qs~$s zr96F4AXwR`ix>rshdxtT;P@ocx83b-spsl`a>!a1CZQfJwybswi&3s|vimNmONp&L zS_Sb@919!H?zcD~Y`}(5Dq+YbsOP>T_X+_@h4-?JHS)QMg?my6(ZgS|6_q$Pn@U)G zNWmi~sGo*KDPNzNLdKR(F3|{i3@7qP+E0iN7SP#_RZ&Kx6y)&X5WH$HkJNcHqJb3w zwBvEd)jH6tuhd1Uk`G5@=)D&RLKlMobjF0n_BT>iE7DsrD7)Vh`UWSbEv*kYxVX<8 z{Zzn92RBnyMTrJIO71lCj$DGK?pVk~u8MFGXFV~xcNT#Hk4Q5cJynP9q&0r)-;i&$ z2jWGO(D9SXuMpCC17bw-b(N_qp_3YVdx;Sv%R zztcJ#_;n>lUH&s=8vC+;qc!)_IFD~$;JuB)uF$8w_QE|_M9Sdp9V44)jm~{~y)%Wr zt`{&UcLJVmeSd!#sA4kT%!B1zvI;zL{JKJT6d7)AmusdMdW(Kk;nx5Pb3X42OJKe< zl0YX12r>yHyvc>uZxEuNL#IruRqs2d_PgTMYb)(DShOx9ELC}tiJaN)*;iB|G84`C z^>(mXt*-~{vj@+QLQEqT7xn$`Pv6z+vH{ryX2%}EEb?@24a*5>lWru)-^=Pf2FM?? zlVO6nHj-p^0#`fC=!MKRpu_^gOw*^Bm~SB#v>Mz^jx-^!OIrT!`(#fCsg%E~ntX94 zEyGL{4Fh_y1&H-_sH5pDGY)9c>z}JiK;fpIT3yWvCK{$Q1O87JU< z2bY!pzb6tGM^O@G@w|V@?~CR?57qlw#FD*7M94RK546obz;~=+eBcG$>xmn)xK(8MwviBM}&<`YwTA+$mv(rFD6-;z=-(Kz$@!4nodqMf$<55)4Z%DO> zghw&YpS!TIJ1hrIU|vc|`)4gr`&n{NtQd>%Iq92Ssx{Jz2cqRv3>rk_V$tHW{jqh1 z&a#*TcOk%#rvu$GKoHKG{&GOtsfpUK&x}R}>)=<&X%o@&36lF${QY|@_I}PwC!=it z*K|Bve;3_7h?^W=?axV=lNoeRz1pAmJB4Z)yJs0b&m`cbKm4%Ol5_LtyQ;s;aqCJ^2EaPoGpR@v43VfDYZ(TNS+brlvqn6V_(-8yyo<=|4gt^1t>Scp$U&{c_G5 z9mn!&FPekjhOR!r;#9(_d0@wW?8vR&s^}TWff%q+ij61M-1o`vtf);?bq-;o!tpLt zda*b4fa$|$+TAl(k+#rPCogh6O_-BfH}8t_+A=0XRI&v7+k8-DU=I2_qw>w>dt@{ZeDu zPmgXbs@ohNNkwEON3kRk$S-CCv0b3rx5{OZT{_u4Wxx%Ql(C zZ~Z8}4t$POs@Ql^d0+^N+jcGv1joeWWX*rTGTa9RQ<*Y)Zx8H)6)oSkf1T_P)vM;D zV$9dGD^w!l6F!V+(8@@d|aNFmJ^U3Wm+7<$r4vkNDc@AVBqFwa} zUf8Aec?2<}`U>*J7jDSy9L$n+?3JceGNLfsJCnB10edxlsD|X(|!pC zx5hL41?(>%pl=A!PpLFnw`Ztmv+1=A^LLUbEz!wOKhjY|5MM{{kcgq3pTUvL_T+U0 z(R(mmkmx%ihx8)Cd(l;3d!O)mKCBc`++B;Yym>=#AJ}n#_@&o}QW%C_*wE3Kn(y7o zPm7@T3e~Dx1ZW87L1)l_X&q$BD`3j5)DskEA;2? zKW{f4Ac`NhPPE78Ei2F~*%l60rEMNS~cFKl_5W{%g;Bo(HHQWLI5yE+59|}|% z8i$8)Xz4xo5VYJPVW0z1IuKrm+jHK-9Xx@`hvoidON(;Q=7OF%;3Ze+HN|{<*f$CZ z3rjBLXj&Kr%_ITO6G|oFQ#fpFY&vg!N2L3Gx6o{C#Wdf~oTbC=Ji!TrA)dcKZzcrmiE94r;D{I&7E#8Ky{*wz)@=+u`H?{k z&H$DBOzt^QgKlHLB_I}8gN+sf5+zlt;$;$Clz%kFtTtBH@_GXXkQ_oxu!in-^2Db;pjM7&n_8=Owgj=hU*-0ta({F3-{P zp^Dy66!P2aYwEsSo9J|bY}726Oj1L_+&JIUqLCXqQf!a))T`PfmXO)fHLE|_25mRm z;j-#LH}t?ukOX>s)!Nl$Tyi={UfybSXmO}u}wF@cFq9-yTF$EWRjp>js3M}VNb6boK6}FgbKdNNl@fY$7qEMQ;j)PSq%%;v~@$(phBz* zKgIFIoMO2prPR9fz@d3M#7dxtA!wH{rlmqSMy(wyyg>92hEzrq{spVQ5k$K}! zE;~9`E^aC(`oV(wK3KWEWB9}tBzJ{EI+m7VG=F|g?56v#`)IL^*73a`aKN{(o&8~IxKta&I2E#hhNwWEyBf)W5vxX(9Oe9dNrGvdv>I0I{ zQugdedyM8x_$Cz%$5N?&gJCIidN_pSk6luIGA_=m!?9EXM5F}GJ|AwXy11Xk7r`~} z2~^1C)Pa&ySy}DJCx4*5$oFiS926{PSfHuWJuC+)k2}|s=*}|;+NL0I?O1SumsLmi zqU)=~vAkfIVpzmo>|nR7$i>>jfAcx6f9A40v%k_ zlY~GJ9cfE1@T5!tM)3?810&Njpkk%@|1EMw{(99ybd>6~Z)4AP-Di8gKhU77P??af z>zjia6{58L<#=wJ^%rMJDlgA_e8ta^nCx(H-L)EhVqHfacojvY8C_?3}C{JvlYt3pU{&EULa_(2k+N zZNJ5Y{Oxm`$ZIJo!pc-6lm!JaA9Ux z3#%}J4-JEEATn&@Xc*-=ol2Wv*2SX4`i>}Ih5tLLlBrE6r8(60-4Rc$SWe0aB+7K= z1eswp9RhE|91XjVeS{Cw)&w~Y$3mc?%|Q&trenfhYscdj%?YS=7m!e)q(NVE%};H| zcH1lX@k|9va7_eq@kGGyf}{NeW?c9j&EMbuUon6J$OhnI>*FNqCWUHvFdo@(JmwEw z#UgqgUzlHEz^;9Xt6hJx@g6cEA#0@`vTE$PZHiu{$Re0{dwmRJ%ffZFryo3!k6EYgl5N7N~!%Qh1csOh%m$y~Szk#p!~a6qDe zRO9pylCXsHykq9t z62nutBn4d>9^W1!ylGsH%80&_S;KSBwgt!^)HY6QG=6nN_Mf3$%ktGPq3k?qNk!n2 zeZQCDrrc@>>wNcyowNItgsShwaFedG$A|EQ((L;hEJ16*v)1@?GV>2ayE~|JmJpUH z3;Hmj4fZaSH)RB4Bq1SH2`+E@Rvc1~y1j)pNyAnovxRWd}$HFPY)1{|<$fsn&-s6KQ zP&wj5b>=>&6&Mp{{|(G6{EEAI-)6;!lPtX}^w6$(@YiR?^^(i{n zfMMr1phW~HSK>z%XB@BCH`Vz+0R9phDS!1yLlpG0UJr;L&k#~;khuaIgE$1GfPwLX zh;^=>{8uIJ{aKwwP~TN?@6=7uGOS48Ukn3E;UJRQU`>vYs!jrVU$nQ;l|dW<%#<(1 zh~VL+RIozj2`8v3=)jdBw^)+1oWg4|%Ab)gqj?m)U{Ez_GC`;uO7im%CK|>6F*{V$ z+;X0PxZjA4_~f_Kb1w4J zZe4B0$E&z#57^z%hRWkhN@GtV75TIMi7k>W(EwmnAI$y+xtx{*+h zWd_WA_#R^<40#`o;Ej0(m-G*urq};w*ABXIVBN%lVaPu+l-P4-T}GeY5GOtoso9h* zvE=`+{++zAh?`dz5ta7fr?kkm@|sTwB-G_PFVnCqG5WAu3$4&TquU(U|F`ek!5bHVhGW?Z)%4pktAA~3s1VT+RM zW~p|M*7>#MV<-)xawS2vj!58pidT4=3=765GcOfe0)C7JE|N}IPRvyx5pe?wPQp;` zN1(9@q(%VJ_YxQp=q!N!g!P}#XiMLW(IGASN>%%_MUilL?&&BIMefO}bkAz#S=m@l z$WQoG_srK-d*-dSHLYD(@%tb&+J?^8!5jFX^ovwXRKQy{go+&l3HFzuPh+b)9R)d9ARl-+(y#4p zj4?o1I0CVlF-4hS2U2!<1TTn-m35Tt_2bQU>M#YsyI?jo7-Q7q!el7Xqq+{Lu3_##u0qZd! zhxsf<3UarEhG@D{lz6aRz-I}v-%Y}kK1j=iUadeUrmXH-Kn9v_5n?!A5#mwjQwrng z)`xUG;db80a-P%hUkiQM>Yh~N^ohxpeC2tevm>URmpj$-+$aUr*P`Fm(pCc7IWRBE zpR^`+M-K59f!VSwaSEv_VUUht}N}VYf^F#h! zORBD8R{028bLu*i)MT?j`tM8O`2nd>UZ&@Vqci3v-UX(z!zmmVpbCy6;rmt_t`1y! zkf7OrMl&BCrKimC zhI5Oq*&<28IF3Qj1?W)N{;rPzowq!W!Goece&h!HCx0ETW7``ba1|A*{l~JP*LoyAz z_-cB1dH-qvA&jdV3U;LXR(Sk`bNirf?Dt(}n$DfbvH!}mdM|)YJJr&6-3X{EW`$W^ z_}{xgF8SmhFLO6JxPC2IU@*7!my!s7OC5o|o@aHdWNtue(B4`Mo6?uk$^2~5GB)Dgv2@a#anhOMSq(DB${v>VLRH=nyld_ygkPeFXB zUi;A0><;gs!J5m1O`RUu)Hd3)M;G-&u>EbMo?eq0iQOE>^jsW*!YM)eMrEOZZ)Wx@ zysFVKZiP7dLe(FmMHAYHUZF@$h;t4!c=v*_8_qJcZa z*$oi01w3aI6qJc8yOH00GE5Ql2{XrN1P+yUw1m+G$bpixr#|}p8ozAY3X@WcY3&K$ zymykH{%+QUD@QL=j1d=|OZwu!i#TK=a7PMRx`Oy!NiTb$9^IqYbIbXk0CxV$M=I73 ze4|JylYQRnRlmXA0?N2$KZ4O#4AXv;{wREPP1$8HgR@ zYvoRHIZ2OWeipFct67&&Dl%qboU0VxA4P(kJX1+c&}fo4e`rq=UnuafBk}HS!H>{m zNo$K_7iRcNw;#8yA`)Xgx9XH%>S2a$y51C8^sJiH>*YwP;7O^(bORPV5!VMKFs8Io zQ8%`CW|Je*m&mjiY|{9PS5;;9jSGL|H=7ThOLKOD5g$;RdSk}7l*KpvQdJYU<%1^q z*R)A#%o5fvS6u|=HCJ5oOFV|uq_)5}<4FnF7W5bIWgAaW9|)ETpAV7Wqlb9Z2fP;yn^ zpsO^MWrf%3;3&@J?8bjlRO}8P zwtwirL*mY(3Q;x^>Ox~e9>AU9Mzpd-Hksy*tcAH49Pc@H(v^z-f-773_dhF2Eg6pb zcx8_zaW-V@xo))TD*NpTdh^d=$Iy8RGQCn@icy^H91`ju83c6brV^iT2%%6pMyuzN zHa0AvfhgS9*9VBE=mDPkT-)f)PnZe2DZNS#!3oP`3tCKcdQJ9bAWlDf?g2YI(`JUO zf+8XWv@mj!Tyw<2_38qQ{6j4Rs9f7ui(ZKJjndy*)~-hz%B4MuofID}(WZ?(e^DN! zN8bn$)D+tPG`x&%3T5_k?)~iBF#F-Q=(5M+Di@WEszUI$%quCi{80Q$8&8h~i9J(= zRa+l&1IwsEqOt0Vz`_wcg;UK%_ZJwW7%exSTsQ*GMn9R*P_ZCl$I@gF=e|dUTEBR? zdMWXi{)0IUH#*nC_HAdr%Sw}>vi*7e z>DOR858E@ajE1jleK(oC`sCq0-f&byTT@$?(W{a!q2dbNvMX9U?R&k%?rK1jEl=i5 zx;3XJZu+0oGS>|W@;PoIUhlXT7QXeUp0Dh7GB;nK)|oc`;h5@-EqsRQ`PZa*&)U&P zadlH@5NOGcoNW!aT>jD<9BT}+ir`POk5iuwoao-(zxJH8Oag9cxKwZeQ*}xi=R#m) z4Z5SiDNF#ahE}in5yGJJC~X%RWxkvEz}GiQ+#p!m1v%m`x-^aP@VSm5e@Tjy)-ejR zg1>?_xEA3p_`s|<8e`DJ6{R*+NUc8j*YWbmjy7TH58Hj$^>EY=;x}Edk%`8j+)ZfA zX}?DQs{7`JYeOuDOy7Ga60T4;$uWVdV9D=_=#}jU)7f!Hh4LBF@@ne9m3j(h^-{+z z-ec*T)u+sMQ)klWy$D}ny8&R}^KMv7WkbU;U}dOyk@OcVbuDrc-;TnmSfz;FEL+s>;P|5(yj$c4+TQPJZ zj^=B=FZDq!gUxPjX+g^JQ#zI3H<+|FsOI!hktN^H!msfA9M>c=P4MgoN8yfItO(^F z)(AN5#XK}7N?RA8kF3;u8n^E1wzztaE9gWc&WTNJ-H++{-a4JWVG_Z6Uf*&ZO6bt_ z=(2K$52@C<;{Ebc1$I2R@u&Qdsh)Ml+GK0_Ubwp4oI^MFksqnA33Z;pQVv%2OQQV>g)XX#~b7_NSc~R z$qxA3?_$iW1dY_%pF==tG%u+miDY^`y@Yz0%~vE1RoqI3_unzn2a|e?WtNzEn>6FN zggi?v#Ie!H!wC7m%g+8g?Hk`WP91-0Xq}!7b4)KT`u6;x3Vbt^TTJ((c67n>_%k}| ze0{Vut4wSK+AaQv#NEr)>RsklFRbF{e1N+?tkl+$4o6NoiO5*liCt=b*E18t?Y@;0 zpZodS@ru+g4(LGk6ogs=>)w26?s)45@TH&oHRqncRvWxzZuUx!iSsI=NRvlKlpVjlMwQbi!r0VpmR}~K@V&3{4H1Pp z`?A6Np5k5phKITs%)W2))i&PXZWS#cSVp*ir$we@`GcvlmKj{ zeg{^iGRrvDv$t&GfUKw)s9?qTL?ANJ)qIFd|D&mCyF7NU5jM>~^*>zz5}igXp~^{q z0V`4OE%UgnKYdnB4HUb`3FG1^o@OnU(VX zC7vZjyb>o1E6}Q3AIa|z=;MkvBOQW2T9iiarXU;oiceJ&KKj1U7nWSVbgz;sFd_84 zn0z-gJNs;Hi-iP-;e%!nF4|#yt<1;~ES1w+lGAqTon7#aD@*Bx?CnMHqHpzl>+GpO zGO=XPjg7GxJlw{rAUF+Ve>@S_Yg*EjInx8jplwka@{!BW z+vPk|*N+3=9p7Ho;7h5rmeJ+(MB?LYVC8rSWjXd@&a503tU2;GgQ5}^@%=# z-;Otu3--fJhi9g{8(JIo|Lw!9m%5Gv#}$GV$D^Su_h=)9n_V9nC)`+QZt|HDI5Z(C zMVuZnn^P>Uvx6E63h1CiNDFFGvzf0d@3`NZ#IMeJAhWkfoGr5sy0|pfy0g8Zp}T*e zN)?ggvLcqxrX>{B6P;izi4&78 zi?*g062`cKVOH;_2s(Bs7)`8Wk>3RK^5W9YW=S=U7;E>xRvbPEFJ|Xb2l?Xr<92GV z9g1Iw35d8z-RNTcqAr4IqX3&iB=-y`$@Y#EI%PAOeG$ z!G4AiLh%S^-}GUPw0w6yyYti!@-aK9BBERD5hbxiW(sD>g6?t7G&WwEwi!rA)GQpn z6OI{0=v8`gaY$jZcIz`ZM$z3|=ih(dQf))^g!n(j?@Pif!BUj^m^Ko?~Wxi0R89PifD0F_qH`Eu^badnut3qJgI-@)VVK$ z#YNFXV)*zGe0P!14>yaWC7d!ICLvx*}-of;VWw!37JgS zWStuwLg2X~oz=nY3owpGyjpa@)H3v<1K!mK7u(Xr2{rZxRk2gl^*M zDL6$_i>qmq>xp-zaI5oVO-C^h2WKMm$bAm&!z?hU7rxZoH70S-&9%E-T?NDVrK)@K z($pfp68*?M9XY<_XQa`bOcrnbS6KhQdAb%jA9NxjS@U%k9@cgnNmi<}#P4tZ9I|Q~ z^iL@)SVnx8x$>kywUf{Rv+-hd zsr-L)!?ex^IJyp#mGa4ufa6E)li3xybltVk5|dlN|LWV&E4DhjTY1U`6XQ-mnAJnJ zY1^3d$FnBs_2~u?k2PK^JE1=U8aaK->U&7Ps^^x}pj>S4pV4YJRNu3gBljtrixCM9 zON@kq`Uf6SN(9BodV6q+ZL}Ovyy&{raq=>&KVqM1*@6P(rWllz4KY9ST8F$BRb=@@ zWi~6&B9q2MUCQX+meQO?Ii9G%x@TgF1((pd9LUdu$exkNU6PYzWh}ih0Sn%?f=|#ctjYo}3#Y*Sh>&`IfI=`yQPZ!@$sR_lR;k5m%b3%TU>yw@Fj02*ON<1 zj`l71=ylxTRYJ-U!{`PujVsVDVloItA^z{4BV$ZWOQXAJpYrf@2oRB zw&Qu&FnbN;@w|X@_lcWo$Xztwa<1((eYW?R%taQW98$TzlQ{M!zVw)|-btI!YUu!- zk|

#>5Po4PUQX9c@)RQ<|gUtf>3DG67~!yOwUNaO^&;6Y0*yVigM8k3I&lB?{~H zdL_&?g&H5H78%5u(gRw`sZ2lT@>ts;vReo5epfTFh^W}uY|s4sHAW*IFB)xZXE+5P zL(`t;rF^rsFTL7lhPd4k%3jUaH6?M5{Lc9$mEeHR@TNe@IAZ)WN4URMiJTxtD@6Y~ zzc@r4gq>lD8KAlx!)CM(n*OEE6T(eWJ|B#-anrfRYvrh!^Ur>!zQ{ST#L zTUJmM6m%?V_-HXqUpOQ#!I4|hz$R6hTQO`_LEM};&7Z^FU_Yr4rO0MSkmdcqGDeZ9 zv~aUn4-RvMZC(ZAxE*eqJ)wEgz$I^fYN}`D@h=~H*S+)G7}9wrQ(kWFliU3&m&ZaG zApKRcWXG+r>Kr%>-?Yel{fJ3>zAa1chn374$clUW#dXy57g4fpSmmYWdD9TR1eu%B*+FVjDjIySjLA}M3LVUrQu40BIlJK9EBn(WY{=slVOhqj+ zXCqy7>@R%J%{{NhuYKu!@1f;8EnHnw(|K6Q zKWOFncw%GG9@3YBRBQ-0oFcnNLc7~?=s@yWCa4lix``{Ljbu86XJO%Sh1l++P#Z=G zK4h+wzE5Y>Er`8Btsw~+LTFnW+Lu9_VGUpEUdDHwNl!OlS=dJ6B!J=g8grpD&8|(6 zLg=q<$OflFbfG8`Bvnx+8Bsx+Ib-XHc+AEfshyqgi$a2w*#u$GtGiF^8t)}l@V2BE zy~a;lg3QlcA)C91Pgy$JwET9dn-8%Sgo>ZIdt@CfEo~(Gn7Q>XlV33li-EiSs!lyiX0rEwZZns;F7XJ zwH&*%njYA19GE$ZnzE0bxT1s*%0hEpDPgL(VhhC#&J2`eYWd&{R?sZ z3zg?mJwPDU#knUB;qYg%vJY@lQ@j}Y`vN1}>$LBMQ;R<68N#+L`OlaZt1>0C8^~H&F+B^^ z3=R$Lg`^#gavCo$By48~zIdX$8JdM7lQ%z74ZDcD7z^xi^)wBgt74UEr0C>QO>p_- zSGTL2fWD2+Wrc=ZruSRXd2{Sz7H@gW;w07}xJ8~`J7i<>Qp7_)K`$YilXh&u9`Gnp z9twQ-S&Z2wYcbOGbG2Ac9LB&P3%kT({mm#-X#8&a(FL`iHqTQpYm~H$avG6G#%9qI zCDXdE3Itz?zKxSPv9&K+#=Y+n__R>kmvKePk4yl?>e)(QYDzT(E7Pf;$oJ63=$SjI zuC7jc;-o5@z=Yei0O{`0&&;a!7t+U(ap9NsllxS`%^_gbX-~qB@pi*@PUZ8oJq^QZ zul@deBJ!b^dC76lc~d^AhebA9^D&3!VK;OzT)`OLoa;pE>a7cq$@M)OWYBjR<-x

|LJqBbbT6g%i`Uy z_@r$L)&g#$;XFV8_rfUNB&W$be_)US4zJP%4Ph+}uuUNL2dyW%gJIxs?g0JltdfuG z)o&4`%zwkj6f@I!r$kHxl`BZ+RFfO%@S2aXAJ>D9-B~B(wY~M$uBah_tB-FpVMI6qB$DH!?|Y3Zl+pX@u>5yLC&{Q ztB0nExYVd}Xa+4kuDtjM;2zPH*f+Qg2+4JUf;{l#T(Jo=Jk=qQQ9vSmdO)gzVGIYg zMUuGjAhj3-U56faIJ=eoc5|=A!6y-}%)-WXc;g&OXCL+PLb36T1w?#!o6%{f}ewV@wp?+2^!huN^ywQ4`7Gq>iF?w}oz z(3jU^b@*#lWH&KLXP+#HU&BJy^*k~X63es1o3)Di*~~(C)Yq@=FJCfR(A#)Z=RR}N zzNTc!`O-xVAYUqZUkJ8|%=ArE+!W{eGUIP2I-3F%Ej3EiCSJ0m?1jVo3+i$-^?hH^ zc2Ti`D2>_1!UigjdvNJV{hrR=r&YVF^LN&3Xa7+n)RB_AJZE?+spB0rkw^@pZbqhc z#vwlf`v_WtzQX)7eL6JeN<4NU=DQ4x+LZsX;2|bc1-*!IFx}AK^}(Ya6<(9BxUF=T zJ7?X15sREY!-$#~)U!`DLH6-XoQYDlm|S5IpU2$VBWFuiPEqSf4#3p~Z1ND8|{eTwlqP+d?7(6%`YzFG{f(tcv`MbZ3mt ze?SX&q3HPODbixQW@L0a(R5LV>8tlU4Uu3~ z41QiA%3%D0jMUv9U zAlA11eSNPfg?Xi^c>Mvs5&A68z!L~=3PEx4ym%_IwYzR;?TYN@4I`{ryu4vZqGy=M zoOICwEjP9HsFTuM=48@tcr8By;_Q`F%jU&%d<|ybe zK6ecJkpP6amf6chcI^rt(z@s46d%};*lQ4Kj#k$ls9f<0-}nHT@U>(C!pcijXu9NY zU#6J`88JLI?gHBs>&M;--OfrG-t;`zJ$7l{6S{nc_?WVhsNPaYZ`&{Yze=nJBF!;9x z1=bUU*>W=+Ga1&kN5??hw*2M#ZX&|bp!aTPB(L$p51vUa>t(xhBVG|WGmKdyoaapV zREK61K}IaM>=Ieov+@_RZ?ojFNF1@?Uf&0XO6O2#b$P));j_q-^7EA(aI;-p!AC+|OCyjxd#@NW(IRak zLu6F{KP-T{y1RE_D8?ZBrf!>S9qPH5gDKWR4wou+Qa8&CYo4?~OeyhUKbtPyqk~J0 zj&Vpe39oTU-)Nw_s_LPB<)Yz4WDOb{9vMl<&)3-dW0v5^%+`NX5U<)sGki07)#iqn zhzmMPbNd>)DdUPD5vaDzN zT{5vY$(XaeOQx!H9bfDx2FdhgU7xc@)!2Q{)QB)K08Xfka0#|In+1-7#V_NoS{leK z6qcndW@CZq5@r^1nNY!DX zU!;o6E9~8S)%%PS7qzAs9hWIHD}1sa9i`lg+x*KkFZzA_&Y=Ov`JeLFu=c;P^1G_Y zAOs(`OQU-#N6!Y@;b%(D2zkxc$0eersVV271sBWxyqnt;K}n;xX~tP%O8(fl=b!y~ zmY4Mso+2!0*6jTEBc+La1qsqI`1Xf!>^q)^=r9|$eAD&EVUfz8s{_kcZt)-ji6tjL zhqetBs58Y<2>1P_K?Sqf5zxL1WSWFV(H0dfGzUUm=;vwelKKM;*)fV=GF9Lh2}bE6 zzwI>0#NxuzR;egh`M4?0*uK7xpEO}9H z0KOr76%8K9xRU>C(k2rlVuEOg%H%!JInMRu-I|`B4i*+683TMuEso`FYHqjRE%2d1 zRokal)MFSTH?%ptENoK~9YVj+9k4&(wvRGMZ5&CuW`SC*G&QgholZYg`8*;`dkQly z5#`DwtNPr-t%ba!{Dz0RTnuI)&cR0qDInxE5T8pVUa_!HCo5sri+A)Cntj>nc_u6< zJM+xub#A^)CUz?A^9G>`C$LZi5CS#Lh>afMYRFIoo&rxnbV<%7k45h$M5iPy{UBq$?GF4wes$FeBe<(`EnHiv62)dEHOcAL#+#-$cKt&PiDA8Y? zxD)XZef45G?J(zO%I0|6NdNDI_5OhrWW{kcQ|ZHt!VE68u212TF|s#!1eAN|yPlFgQCb2i`M zek#@TcjL~iYDk-L6D7{AzuZoDu<8*etj|!dCC@%j(6R$CAh#f$U~?1$K6|Qxf}4Xy z4Hp>0;WFAl3-o0{Oi;;E)-8(bXGcN1RXnrhz(ur2wqjZXSQ71n>F+YoU#&I(;(pu_Pld4EE z6yc(q@X&&9yNA4~Pna5ldn(F~RZ%bc^(N0o$nT4)NM&(B`*-G;FQ8?exkPjTl~_*` zJ3G6Y9#DX#!uJwHkv{?8op>t_zQHpT7&YM`-Eh`dsuCDPUeJt8r}f{muQ)a2bs?{G zfnhxZZ5~GXR)y9P@ASK^*YwZ2a(eAuW^+dWNB`!iq55sX@1MQ_p9PFb)*pb&?;RXu zqELhGB}u1vSK%1oK2RL|2Th@q7#ONP265ac07pBJz7}C3hXvt$p<(B{qL;*6?nu4u z?`Ph`!0Qhc?N@63j!KiY^PowrIl+{w6?dUjc}A zcV{RoK4YoGMwsmJPtcr#tY^XDKSHdhjK_qkAWLlS1qJ3g*zC=)*?|hV^pm-4|6=-k zZr2dRZ=$g{Qz+u9rb}#{TFY&tGu=N2DX{jbLkNoU+?rdTraY z+^!DswfT#&2iKXaF_*30Qe{z%+yB7eq0w-zHI`noC3>b}>F@~Z5|VKOKzo+JZKE^9 zcM@*N`}{!CshNgK-o8mbcGEK!1KSJ6dvLhBC z)rp#+0a7~)#Q*;+Sse?Dh;){lu4HNG^qsK#;+Dz~iLtM84o6)e#8b5dIgI8<@Gy(F zD(uwe#@}pGP0iQ*qJ*Ka>)J9zHG0&Bje1=y4i`W*+7sF(>Z;Eu%>O;wgN1lYW%`XE zKX&`Rrv&GV*U(-iVJA+WPq#+5*u7Bp0u{!OBgSb^z-uLL84-z>5+_s$KQ&d5a~TM9}g#;e0dJHiIt zzrpO~7yB?amfEy*LL#Pwy>FxQBt~Laa##A4xZVKwAkRzH;*m2Fu~&aNOAY?(JR7z# zT>o$9#nV5$nA{t$RZ?cIaQfQ+x_5?xQVjnVp1w*vqjm(2XCNx&Zmz&DL!w|_ztFIu zn1ms3DQ$RQZ>;ipx~8c3i~5m5%_ywifZjlNeDbOIn#fvvi=DUzH|59E`^n__m{I(TF@XcPL7U^$r|h5L?j@c z$D>XltV+I$KyquoMr8F5f`FrTji^loRtpokc7{yQ&et&UHr%lG0t&{Z~ zeVidyhzfQ;4jqOwBf4PGV!SoX*zn)K3%;gRX*g#a`M?FzAQ9_-1+?uF`KiAPMl+y~ zNEw#>UBm4tJQk@PVob9{jMmpZq|K3;>d4k7?7nehgd>_LdLkPqe4{$1;qXvku7DD1BpN2HfDS{- ze@vCiHR>ovi5C0Dhq>4pf=$$8OYMPe!SQw|vSK#Q63%GziBQvM1E9??8%Nb*IkM|< z3Xe)iV20?G%JN}S9jURgdec5rW-9#}^{wp|@sqRZlw>*aXez?WZTs^faA;=6+givm z5NLU^%BE7t&ulYM3{n0T!wdw^iKr=TA`Ak~QxrCvkWHCy|Gt@^o8Nemk5RBMQa%PM zKp|{!QCV;sSyoga)FTFo7XU7izLlt~Frbl-)9hePB1Pbwc*vN?dnI(4Lw-1C$q4Ix zQoj;m;?{t6OBcXTBS#t1z25{G5+!yS2%;h-OMSw|u>wXp97lB_F|Ym^=LnocA-F@d z1)Edz9j0Zog(@P=2M7<|E{#QVz#_PBaVp-CnHgpGBBlYFWYRpv4C9n1xVK` z$=AUfo=tAwmEpu~$WzFy!G`fc?c=TS({O^TRMKDso8K|TX3d)xbL2R>dbMd0A7!Sj zW^ihTF#A!?XW#4LRP-90>yf){4khJ%Zo5HGc7%j&Ib&0}(`~SpYm=g>d%fwOg`>i< zV9OUAj_$^MK8r`uB4R$c3@E?i6{He7a>wtZj}QpGfqqLC8UF?uIdKBBt#r9V&NIj$d&nU$%BF{R@oZ6F0@+EtluAZ>ybyjF$@&k$?JO}L zWk{QK-n}-PgRIX`Nk3os-t5KcfwP(-x&tGX&NXIt9UyN>2n0Aput${EVACo7lmH8& zOYVFB9F<&}&klbw^4Y>{I*uDJ-luO1=EhT?eiLq%bO#Z$rI!EoDT@KrhjaiGx2v#|IhqS9u;Ad#%PSde1J z{{)8ig!-^7o7Y>VXMGyT%%w|M!AONeOqNDr;R$Ka1W^%|4fKC9C@gFQi9pn@n%47OYIW20m4i$*v!qQ>x z496OmmVI4XMSdP@oR<1D72^5wn#1;-#?rub5e)HZqh8tDHvjHAG-U;DTQcf4fu% z8(5Z{IA%F}%_pO;2BT2K|in${Y7EVpA~z9MwKCyzKxNc;25e~l<0TWX8gky>-<4XONO!e z;x0@yWsQoLxGBm+E1bupZTH_Xo#r>2KrMPT#dqYE+;)?_ugzM;kGfyUapT3mzN-2> zM@qXdTXjO!e4kvUQhA+D?Wq#g_)njp!+s@b3yP%*7d9g?MFfI5a*F6uH-E41_<*#o zoN?EF^YBUN>9|-h=_H3~KoxLJZ-BN*_u(fE<&T%uO&S1n+$0fWlT37B3O($YOIi|8 zy0O#$l{a0^+Wa))YTj=du@`n9P^#gGK!$@FjnlB)zqVgxy-0f|>to54O9OA27hTX_AhI>tx&YZyMhx=s_1D$z9Y*>B|}qF(>q79h=YywmlE=(lgD`R)+%p;Y`jc&FckWASmhC>L{e z?!BJhV`HUFl{{vGeTEg-65Mk@hD{s z0`Ky8CA9y@GV|(>aNGK;0=O90eq3T83+&X zMr}y0gMTwRFJ5(_qePJCaz9moHCB^pFKpyn6cLqe}-!u9iQ3B5u5t$hK_7;|A z>FKJNo1CZ)AL`NEoR8;R`pvW@^ALy|mA-gXN5y1WvC%9V^CeS_-0+>Ty1Fvr_t9V2 z>5<(CG8EbmP0e=xCd27?vA)M@Q#Q+QYIw7=pYJ5OcKc5Tp*k#07bi-ypNL0#8EmAi zWJ40>%Z+&SY5GBGkbakhzNCw{!}qXw+bI7g2?yVX>_UV13*Wls-=xHs>V9LzXMW6X zWExdPR=G@+;1Ge+HO-rmjAG3{`la*_0~oOuCV4u;gMw21r=hlN85xin4JDvIIZrzI z0}gmM_i?*-F3wCcKoRmTjZ({@J5?^-Piy_&SVw8*x+e9zHmi__PT_q@(p)F_^7x$m zb~&mSo6*oqlNz0s|5~^9OW~FDQ&j}F9j)EumSMZSop8c>Et8#bs1p-2M28HUacHyv z1N{uXS0V~B`Zw?12|2mPI|}V4C8B(XVdmqG`k4N{OmECv>aTuqgI->$x!mH>g}hw( zXMV|Uc-v)#>+Vvc3-Tu?z1eb_nY9PRf_8T3;B5gdw6pxa=iAVbLbQ>kWdLXq;hke2 z(F;FP&qs<2e37HIN1M3a#NU68I%mg)4VR3U&9>ja=C(B2@ISL7h0_U6P=C3+eA48@mt8*8?{b5{U@j{C}bCiVx%C$s}Aewg&^C# ztR&v6S6;yj6059aa_d8|-})Z@bry(gPfbk)O{Lo^%R_~?>I42(KAbFprO?>@O0nr8 zrV51#pZ5*3Vb|iTblp3HL10cGriiJy4&BlVnXS-+#rJxu56B>8nO66>#-oHR$U$)!-KOl>U$d*!RL)Zr>A@TAseGk~q;To;vsKi8e z_fqcRckj=i#wP0uJUh*iD(6m=1rMWg=Nd$vV+*?FHZlumrm3m9`x=kh^x6H~OCAsD za5!(YJde{ECK^pX`K>PLri*0UqyiQ&f+aY#U8@^FJ3pE*e{fgN(idbp$Ob2vr^o+5 zf8`}TJ3Bl0rUI`fQ2#%}>^%5&VRV@^o zrC-rgfg4acT7D1n_N1gFdZ=1~J7IH&!q|$O^m`8t3k!~xk5=^oHtZ(?8O2CHV`8{} zLx*$$nj!!12Xt^)gla*PEjBUprXUk0M?=RzztVYp7i#+MWXT!OVF0+)&5>UfJx6Qx zPeDB5nKQhT<1@$5+Nb+JpMmlL%(!Z&f=@wB+;s6utRKlK6A+*#H*fAh*<4-i`|!rS zD4ZGR(W!@H6^jYCEUN);36zuMH##1^AktcC{Wb2e!?3->jyR=Ct| zXaS}ltW!xwqcDX8fWb*DRKEd#5BYTx5eUJ&^esRefxV7p#7feH61PQ7OiaAM znad#wL?ec7K_sTa$mqqntN3ot0+eF*%OJcOI#XwunVIAJ!JW(p*g}3v&7LG5NH3_u z%WoIe);W=te85B^lw3#Jm?olZ0k&9O(6$bZEYOoMg6mG-0?)f` z7y|#}ym0^xo(xcJ21BqriL!8$Btup01a(!75Rzvbtjd^Um;3kO%kcx9-nl05&Z9)E zKL_yypiElAsrjw=&YWA%Ry6SK03h)6Xm{nE@ox|=rbd84Bz4O0pF{XVV39)S1C9YQ z#5$00e|<4tlsBsn@E1-v1f3uqZEetC$cVn-X0g}C`yw3#R(4m$7uuuw-J4!{$nron7Oyq;9bcY=csPi)&c(~qtggf(13$6@K>PB+Y#gkEj}r_a z>7z$NHU1td1c(G0p<1Fc1jcz}SP;a}YIRdNT;N6iB>7Wr2mwr)1(6Au$CQHkXB=L) z@5&o8MO_X5wmH}Ney7DP9X|W-FS+_yk_-zy!Uwj^omV^1I5InJlfny(FSyO|O6TGX zUjU@eY4d@-9W|m2S%8{`_9YcU6r5;7-jlq8PawQj#u5}B!Q3@IF%f)%-H-KzX^R@Z?=U*Bk^f{I`C6#fBdq9+J!pNgk_k1_6ELFzV$eaJeHX)`4<{_{XVWP$;>A zEC*>Ga1yH~5)VNvCItrXung|OARJ&&+(?NQlB*>NnU+-lM-y4@x%3U#0&ABK`(A*k z4#^{q^xC>QOXNm}NGa%CE0Zi|!S?k#oZV~P#sE!yZUT4=(&GQ?8=7qgI_QYN<6JU$7BgR_sErswX2rh(>ikdycr;cZF-B{^@a7tOk$U zpurUm0I_anF#&w;vB*(TYAV2}p%?wie;rtu)Cdx*w)lc1Jka#;#CczU_pJ2gJS&=g zt$;+J6&}s5cE7y5M>3K<%JlI00qm+HoS_?#w$@G{pcOuM}q-R z-ZRyS%ve#&S|8x~8F~Vfm;(_QrNo4-1@kYXQ`V{nF#Z zQqY0<4n~Bau<=T94;E`ePc5`MsZO6;@U^Vb`lV5UN0CHfNKl*C;)bGpaqV#aodZpo z6kcd8^InNgSMnu15!8Ip{*u4OFVuOwl3AC1c=4Ldtt#1u)gW3`(I zKGeWuPxjaN(OCTJYvc)3!1fdg;!ApZAo-HQtA>I!fYsB}lOvs9*$GY)h;CFzOP7BfuMN!P=?xOZ14Ukn&vU%__gW+{|kA{TYcln3~;yHsH2CgtYFg7u{JLy3ejq}v@^*sQ?z|q}2D!Q}AMn-2IKYh2V)t3F?(Od_tY28k1A4_~E$nV~H35CWMpVs|tIt=h36SwoQm!AG?tBgG#y zx-$O@!8}D`?7!$y_XMGK7y9e~(uxR#Xl3#=!P^lG2x`vX74MV+=LFI!gn`GAOBK`E z@H+?uP3AQXHDlYglcp89&+h_#1NL*3#}PMv_M0ZP+iXfekMsFX!Y%)r!*WFuwjPj# zUMS+&$cVi6>i3gp1O|tPLv}TE!hWrdl!r*>FFoFUEp?q<$ms}opVe-vA&4Yq1;=dM z0VhxZwx9d+oNNXB6&oQR_QP?7*fHyjPYmf7WY7>XJ@{hx}$7$-;$ujXhPU`MHfRheVm z3bV*5`RFI7{A#C3rryqD6}sJTv#s%>X24iAaBy(&YMg5MkA1krLJ^GF4RW+rR*SmU zq++jbuTCf)-FVa*P^z>DPX;!~{d*Zg?`<724K5mXLCrE`azM~?maUHKNG*IaAKs*3 z1(yUcxf_TElJG2;M*oMFobFYp_lLx*8%nh|uLGb|1L=g0Fa4wp5V(3iKrbWx3RucD zxw*4Z(%mq6{fE&6i3~RqBexX%S;3IuXS~sMQiy;<*B4$-0=9y_mDNXA8+g5a(HJDC zPry(ZhGIbb15@_W%1V>EMi~4+S|MIJK|~-RfhO$`!oUP1FDkZQ!u@p|UdcVQ6xev% z`5tmAfSz8@P*Rw=z7LL!fXN1p+sV~u>GZG|A!PpOR(ek$5Wrp686RHz+;4Ang01;6 zh!+C5^RJhGMC6k*`HtLD61HB?$}fNsX>szAilbx)OE8RswI3;_NniOvb;rR*#~%BlznGYp6K zR>tMt<2gV%(Za&Q&Ta-Y7e`QW(QFbDnNJl zEl@P1!tmk$>^(iM$|pgBWwK#T1v~#veZVINQ_>`OqG1qpA3%Q5A|NZOJX&4sk~L&n zEB`o&oWSk^gSYDnn%5Wkq(**1HbwijXhIr5)R0N(+&RP=!0a zJ>||dZEfu^dafV8+lPjRQbKWFxDp^S*sq<&BB3-EK7Q3#+tu@Kbt@9);&? z=E8)p7VL}T=!;IbEA9S3DtcDZ_6zvX-j3z5~0XtOh{?3W2&VWpn`ZhguJI-wlDdl$ybHNFSfqEZj`Xx8 z0l5@-EuJVfVSLQq7{irwlCb|FD$7U5z2kx20>~`*K$drJdRVe0WBx-Vk;IJvpP>2= zsb}iplVCKxraR=Mc^#>MvV*D^GL3}e0NA&d@YKh~#-h01kwfg|e7ulFN**DyeEky! zUpf)+4;fHv_&GF@T!w@P%E(r4|>a7N9PSO#jD=`E)-VOPmcsR)XN_B4v^0YxVnI6FeiTf}lUlSHJUY-K!81`!R zPd&F2hps*3!@B}5yY-xV7bH#@Y$DJrqeT2~PTlnD*DvTbP^+y&h5G_6iRDa}a)`{w z^(bvceg7{}dT$e$%fE(!oL{WgP?5s`J$IV;1H8aEB zB2_FT>I38}q@|gf{|SmvsWRCcPv#XTePJa(T@T@kkB>hGMk`Rf)Pr0&up?93&G_+c zSO3!$lSMSZ-FYvMxIu^oOv(&o6X5FZ3Yibcf`oRry~i@Rkvf|}D2 zOa%zHj+6nmybn!4sbERJ-DL<2`y?H}e=f7|?Cao@z;ve1(|>>cC!CBV&+bHVs|>e9 z3N`Ko9Mss*J83{$!H1OA9SS2%J zhp`GAyptuIp>T8_L4$}zl}bP_74o2M5SbMhXfqtctxUZ34+!PCM0-}o5pwTStQXD!;|Ml&Mb(})WRy^{kQ|&9w(X*1 z^*Z+^NG{rt9@T?;4(V@|eAY`WY6#>{NV4J(?O^#_FZtnNK{}@Yw|xCMx_!r$95gLY zUQVicI9d1Vo%?&mTRS8IExI#iOREYlk>lesJnVI^!eLtWi$&6G12md_lGY7@x*uXs zJMLYN0+ED`(%C;3N`OTOtJZsOSU39;LUnlO6U|kV*@X4N+BF8#;wxGgmf59*Z)NG@ zLu;RdBm2jmQf?!SM>m%&3~KT9{bRiS_x95!|J;S{QffX}d6!GOUjmk6iuC29W$B(R z`)fYTaRu<=@0mncJq`|i(%teiP*$&WML^BEMD$!Td^ZU7m!oOo7?p{tth)h-;c1rZ zQ~cjXpdcUg@MuD@1~M==ApNE;jb zO$9`H?PwD6GR z5CGvbrcb#7(7Z@LK}K;UX3g$*#4qD*7f+i4C;j)%4N zfC>CUfCYu$3QT|rFaahY3jr1sSuk=TOn?b60VcqL!tVhlzyz286Oe^~Hw5LrCF@4J zd;w%JnhRk9On?b60c8{LhPWu309(fdm;e)C0pIu5IJN&0PXd<}8kVXcAFRu6|o{I=FXYp;REUMF5R{%teY!YY0uB4ZF8 z6N@ehaY#-~Kzw{0;^G>AVq#)oFf`iGyk36I1e8pG1x3ld!?vA9puD^s$B!S!(WALI zlyeNnjvYs7Q2}(uAQ(gApbd_N-4Fv!P#kpPHT|s%juvGoa5x}@$KFr|M}3+2T@I&s zt+!RcX)cAet{j&7Dj_!UNKH*aX7@Ctq@PyI z2F)irO&jpASoQ0qmAK#CJ(XdRjO1%*sHoo;@X@(Q0{p)bHK|Dr^u7iV8c;-%M_3Xh3#$ zHumj5fIa)Nk$dzoOrf#Rgr~zn;vs}XF9bwm5Ytovo@ffqRx5<1Iv=|F6R=h0z*=94 zw2U4Y(6<+Q_3DYNtSkft1qC=MZoV%Fu%P&Yx+3fBfAg!Wz3&T9Sy_o)yLJg7If(uH z52CcB5W&&i;0W&yePj>lqcZ$*1mdZ;fWulVMCK@*)w$5tSyGtyVM?(|U8-|$PVg%Bi1w=m7=Pb$Tw2Ki;F07^dps73v zYi$v_XZ6OI5rZ*U2n>0dabf~;5MV)(gCdt95hyJ!#rpLduyXY};mTVmMj^f6j1(dg z+Z$R_BvVod0cnb?EE}lY4@=2j#6(A9!npG=dh{sa#vHE@E7=S+5@115W3MU5X4((3 zPKd~gRcldPR0u=DKUbKLT6vHdu>x2)E@JOdK~FW6m4R z;?goOzS;%>7L+#N@U4~!&~(;@4IA;}Pis+-cLK)v0no+|hB2m>dm{2T%c9}l_XTLV zF?_WZ0wgqth0tt=wO9yEbfeIWA2(LKrhPt~m_XYESX|oX!*_onKru_UZ{Ln3%YGED zzXxE7?+j;KZ%Q@bUVx^w<0q$3)81g#*iUHpws;lX}}Mg%}weY z0Tz_b@$>uKxw*Mmykt2xY}^1Nx%S47gh7Z2i-_NMeNzKz^QE=F)>0HF6}T zO`U>_jEtsd_yrRn0xT%}J~)ej#bUww4I8j{*$Nco=fjXV0>-4#(r)#$9_9-HMZhk+ z#ViHuVJqB(E?v4{#tL_O5pZ=Q_E>uYr&$g zu~Y0JF(!*|no?d2X#;yB42+@eOJ%8fcOx!jw2ftgNvY zPXKhkSDHvW_N_&`;3!xLn9DKq(u;B7h2jef3yW9y`?3-i6kpQ!V%;<%S^e|R`1G^S z;V>lt$&+A+&hnxKTvZ?vu!*%6+p%SE)*QnXvu0w-lneN9F+YAIu%P&H=Tg&XM6yyW zxPP`_5j0`lpiP}Fg^c&}W^?ltM!;Ts6800{OL4|#Oq+^}FTM!D!QzBXPJTsz1;wwr z^HgJ8Lb71t7eHt?Xi}y^AD(>b0e&Snf#)9IE>7G|$BZeHgiipca}sD2nIB*i!7MKm z(_+(v(6B4=IkBnc0Tvh4bd~#tZrQR0AAbB9YAoXX^wb$Jia?H>n1GT9*s6}haeN6- zmyheNy$a*UjpL(`mDE8UwPpUiIdfT6I!aH0`Eqh{@aEg^p{TqL+HM!Y7}rmMRi ztcUml6HqV#YuN!<4=+O3#Aw`k+s#NxNl~!1Y+7psSWsFcqk`-8_4W9C(HHpP$CWUq zPKKfDc;OCgP=V!aA`>{9faHH%umRTGr5HYJFm9N0EyBaY&wh}vD3$;ViekH_b;F!a zrxb|m-S<9(BP0ce?lYkaj%_^y-|-&;?_Du%S^)V1(5YdTobTW)--9{V%*Mot6L`Tz zCGTby7nO9B%glm;0=)F{YbY$Mg+AkQaSktUG;y)v-~L6wMt;ag79lRwfqU-016{jz z^{)o{VQW}WRMM&BBmBjeU*YR-zJoD+I)YNi3(GYsv7XIj0v#t{F4zQX&NsMl$|TIb zVkYmJ>^Mq~=VU?gIO5J%uxHO6y!^`Rum&Z=&||hZ)IFl}g>nf@Kt%+cwtCod7eiCA z7kAxxD+Uc3q$2AB$5a*+1@|>Az`y(6d~Dpj4W_KC*po}aeZ{7Ehk&gj2bSzl&^x^TQ(?#yqY@)~zIe9>xVG;unVqY2UjV1w zDm=uNz)`#zx88gs&O2{3cbq~9u(&9sBV1|xy{G12IQT6vN$ zGXW+*1f)GJxyxZISc6}S>9AqLhA|i)5@12`A$(WW6oY=@=bz)d#ml926JwXbu3P+@ z2`~XS2-qrfVBYsW1`p_iJMXv^K|%aw%nfX^Kf;0{JJR+QSX5MmfBokLl-22A=s8Dh zT=b9gdfP{VpJoD`C*ZU-z;b8-bhStD!2S1fys*wAZFdPQDD6fjUxrPaHsjUT--I@C z3{2@$#m-nwm*cO0|0gP}5n>_!23|YyAZsp~3AjZ- zoIPYY{yj8B>+twvk0LcS)vav&kb4AJP~79x?x&6&JBFwK`VTnbMj<$3IUk z0-hsaDc&VUE}!Bz_x~D$26EUV*+|cyH)k%ZiflMf6|{EUI=m(}p9J^14u%Ad$9;+n zzhVN)Az-V?hk5twxMJo-xcH(O$}vpEmaw46+<6NYEX21|-Se?*A)jjA1gA z%0(~%Cg3dsv_rDtz(nhOu%7p8WG%K0>ks9mj&w0cfr)6%`eD;>mfa59tL{k6G;6 zh5i5esoMn|D1oyu0Vbey0@ji} zV)M$!`2C~5MOIdpQqBG64GXp?(iecmrJZk?!oosHSde@Hj9tgJGdDlZ1ek!D2w2Oq z#bNjF;h_iaMgRW&)nvL)Y-K@d2lnH~kK?JQ{|0T^B`_wAXh%+dlnF2awGyya=EA(^ z4cv409T?6B-M86|yt}7OJg08b$b{moXuDm^VsZ)#zhVM@PrwkF22;P=@xNEz0G(CA zsdWN8LTQ~%(;Zqr`M)Qgf+l?y4DlRdr3nnbU;+U|z*a3nxa@ib4?lDt`u6P`KrMH_ zsXRhC2dm=ZVo6MpGK0nB9IW^@6YysO`tW2iQn?M!{rkT-aNvMHH?@_vrQfstb@zP% zSX^4cNSA5<$&*h#1834C7?Vb}@g(130!$zf2%2c2?QDeV{9)Nv#ycWSM&1n0x&k2|^E=HL%Nm{h0bCd!($tEsphq!dfBw)(C z0>_HYc>RsHy;!UAs$xMAeX@1yR(!JH3*mcyCp7vX<@FL!7t123t+R^q*x1@#Bngh_4BVD=GHI)Cg3vy^hMQ_bv6F^um6Bf?+c`) zGI5cwYuhyA_`=`U(`e!yl7h3knS_|MTCM;EWjpV{9LP z>_2W36JP==BA^Y5fl)-6q_c(_8X8n&xqGJiwV*8j{s$Z_s6@M8$+q1`) z3AeE?K+A0SDiiQ20g5>oBm&vK_rZL>mXfC;FH0ELRm7SmvxH*Z#x*=O16 z7lJ~;Az%F8tI(%SgVq#zRwMZW6JP>NK*K&qicf{#uu&X_ zBK*A0-C};i^8_ybhqi!y0X)x{Yw&9VrXI7fZrw&?XJ@OjovIg?x8MB$`lK{rmT0@BVC<(x$47JKM_ym;e)Kk$@>_ z6e=4GST3gZ)s|E%D0aIY|9kZf7O6^X0>;6BJVGpMPE}V%RJDq%B`|D%Z^fn1Fu}Fo`{P zM~@d^=gys~Xprgz<=qcHh9Pw_NWD2R0Vco%{Fwl4uh*wtjCbDu2y_m*(vzwL<-mah zIFfS=rql^a=gRgm0Vco%WFcS_PDs^eJyxyyNtW_FS%|6x<^2yog&}P^NVPdJ0WT9c zkb6SRWv64tlu5I_Ts2q61b!hP+=~rjXZ=UQLCI>hDjJJw#D(S)PZn3gn8Y#c75$E4 ztO~TEU(zN5gWz?n1DA37}77o7hipihK2@jRM+l06fY;|xqd zodouLvwuDN0;sd^yl1s8ED8FkOe|fx%zKr#qb5a*3yoAh{$wHasTa1RN&F}iU;<3Q ze+lTs=ixWsE>`5ailPN&`}Xaq5o=M#xc>h8?%qdg8l~85Hbpg7v4V2w&>`d(6vCJ|R8hd#7$(33 zn1HVd&?rS4nuMR%tnsxer`JD!-kiCLRh30weG7fp3D9VCr(4Z`F##qZ3xQ*|+5eAy z0c1hx^Fp-Alkmlt-zq8?m?Ficu&@w&cJGF%>lmMR8XLd_m;e({A^}5eZ`7Kd*t2Jk zV$4&dpnUV)QW1Y;v^Wpgq!>_a2oqoeOh9P_w8<01rj_p$W0v9srLL|{9E`FWh7=Ax zteAHR8^Q#bfFcPP6NlsQksRdZ=C`ABbBbWdaU;;`dKq1-< z$z!m1sWM`sYK^vlcYd zPu1`r)UaR{PvuMG4Bci8*0d)~DL}j41!H&a+ z5Bt<`#R$quamcbUVIaE=`}FO=l`{b*pjHCfgb`T&!wR38uLwb*h52R6e}FdeJfFhg z%9#KYP&&Gs_^Cg39i(n5Vy zchJF8zDkM^6lq~8ew42maJ@``2`~ZwAdoN;OPBj@3e4w%Lg!!Z+_7DRA0Fl(9m=g> z0!)Aj_=tcpt{?KnZu;WlVxL&xb3xg*Z5x7Odq8Ul^$7s3lnF2aCg3jwq#gBLhJhTO ze35)EC@WU2fisp5NAX4HbDd0p33P%$OwpS^E8G`AFT`clPrf-^#i!yzAp{N_*e7;> z4(LP=b50B0bSf+tX{Lh>rM2Tpw!gV;LxFi zV!Erp*Bi|hG65#Q1pJwRAtDX6we?;obK}y>_FR{~FY3@>~EDU;<2l37kzpA2$eV*Ka!e zh6k_sL{O|&E3(PmE3S_Ro5n>m0Vco%0+)b3Djk(o)hH}1^cn=82nq%EGDW38qYw64 zL%AX*zyz2;U=h$jAKM>WwrurUvwb2co40I-GorWG8p#zg0Vco%0+E0=x;Hj$+2OUO z`$SN-Zrd)Dv#-}0$rUjHCcp#&k$@qlCypFG2#dwyM)Q~5c=C`Nb=ywQR`N^fUKf;&o3{f|?CIr|-sMV|022s!0*07A*t*?2Ybst96mlIt zlzjlkm|g+j$qaxAFaaju4FdY`6x3ALptQ8qn5H;gM_0!)Aj zFabe8EH9YidScI>Js!v5wxA4KJnbj>d;uV0+8=}_qKC&D$Q3XFCcp%kz$pTb@Sb7{ zY}cs^9(v`rxX4lej$QkJhzvQ3;xd>36JP>Nz)b>%=pNXA;DDP?cl(#mL;@nJt2{3_<6L5y5OM3%5UP9YBy&@>NxyRrL=BUGM zcLm>N0!)Ajc$R=hOnV(Xl;g2Vydo&sM{2PE3Fa zFaajuTLSvIxM`XJdivtE{Ai(Xoc5OJkWJqBokl)OrZS)=<_%) zPo<-k!oriheZKvD&N-L>6JP=!A)pJ1M{P|N8X6j8FW;jh6odG=`gJNVioC{642`~XBzy#zbKwh{N9rc%J1r9H zvm5Z?&o#a@nd@c(enCJV6prfZYFXRjF+m|i4PvunASa6$8NB{O)oBQVjanfzT8!@# zj2VMNP$z_C)!zEkp5Z^3025#Wo+hBthpJ0Z^u`cRGf{N;b_Z}E-`b*$J98~~{PJjw z?H$B|(jpxCkZ(9A*iX5ceL-(5sJnb*dvs--VBA6c2tPD<`Q}4va!wtw4YYb;5T2mO9W=%siAT7S#RtpiXttnkNFc>urPOLlF7<4W&L<^d_>Jq7gL9db4Yu1Z8<2D(sj4T|P1tlZ3F*Vtlb!3z{}4L=vy&&+uy|zyvx+ zfJQ47mGZ8qVE(*0b3Hm_aq2=l*KC#NrcY%0)NP)~eJNhJI!zUyl%yixH&coW#3wr` z3aQ7pTg5Oze62U=G?LO>B0kBF6xwjO&?>ngjTFw1A~ESM(Gp&ysRLh0`8A( zcOA3fs`DCcb*W00a2F%{X=p)1BiE3rU#d}R4$a{O8#TCPVgzXM=%r=~k}2}uePIM{ zn;ed37L_**caoxXI4G3s1 zOei1CNqx4yM$C1^U`($dNl?fanT81@C@wLfXLk!>pt-8>V4(!E!a@oG#^*n;J}xgYH{E6mRYt|7GGxqOrUiF8Zk_%l6jco zF+nLOL-m3Et&%>B=dW%YDFllT-tJZ5c(Ja=ZwK53Bzg`R0HC?`tW z-c&;Es1hR=64vJFh*(T%E}qTr@oOf)1lmu47QL&A<(>EPn4r|w)3Gn8ohh*FrC-WihKIISdI_~n1}A@33(nu@x3 zXsF~e79s^}B#ei$aobEZ1Ek+Y0CN%9u8$?J*XoQg<7J9zf zS|`eON((vZ@dgA58%e}=if1X&Vhg1jApmElTCs{xiAHIzL~U*E1>m~g{5Q3SMkHq@ z5>)cJ_F(F=bXuXy7r=wnaM4Uac?2~2Ak^14E{@7&UF)AjovguX6DzP9j}_lZ>wK5} z$KCbPsD;)(t{536O>L1it}W!Wp7GAgw!cA484x0^s?(arO0mHuPY4T5iP6V6O`*}6 zO-pINDEp6`w~@DQlK-)*MiyFQpb-fzZqq2hB``#T7O1Zu8z#AB zt=Ll!S|q0r{P{muOG4rzmMlgpwCHbW1v{;q&`)b=_i;0Z2nPWg6PyqN;}RQ+=|a<9 zal)~HMm8iclyZch^O#;hAF=jQP=3lKjqcHU3?*8pX-~=#(_FN1hoo<4VfE@J}aK27h?0yGUIt`+{kG(|+8(W^w9 zmIyHlARl4Zv=RNLzxxXjArC1^Bs??8LC?6-wj8r0gi{BGh(-KUeNFz%V|tpTkrPeHwI|V94Na3>b6%LVwnKTz zrYnSRF()R#1Y{(j)x&0M-@ngu)$XwoicL&;X@XCE;GN5mK)X*L#>2&~KHA}U$*>T7 zE(GR>UG-9Muu;MVxbH8XRWV^Y&>92X+x&}n*s`5<=#yf=gBL}K9fnqEii*C`Zx?Gf zv|d30#^~F8^PP(B@TGMWnmVJkodfx&go75~rw@kYD!f1M)Ekf-5Z+u-h1(}Z;C|ta zRaRpc3;7yJcxb~6xd78hsy&I;n?71ojXCFsd|y9kY11==*5S+Ycac}w$qwKrugPh9k4@e zFL(7saXfFym2~eG`#dLBA7}(7u)=3mk9(cfstSQ@C^F`1Z?-Wr{NAEEPB0svq~Nl7rkD1H96!PR<#Eb zKg$G|025#W=O$nmpV6ui6dtXdyEFL-Ccp%kfX@hs*(9x4q}Q|-!>I#?$v4(HqjG~G zutzAggV<7{&?-OeYryxr>Y5@FxAa_F*JxuSZDw4$L*6K&ZPm&5m;e*-9|BH? zRo3Y)BI8*>3GfAa+QI(un#MCzC`1YcgQQ*Yv@`tTp&`ewf2n1(Vh~;gfxbXbXMQw4KgHFh-;3B? zbfhlD`l5r`T?bsb?z{6_#wk#Ds^e#vKobG!-?a$m;Yu0<9sAs|prx)d__q%Th}k5a zS_DOJ5Q1{XIT}7drpPKO@FJb?_-7FykYahf1-+TxY~ntgzudOj6KAYCOdP+jQ7`TDW*cLSdtPPQYmw z@3HDU*z1bnXefi;S_ZAH5)Ml(Y-VwUGyPc1;#;i_dc&zh?_j~im8*RL$E zlG;w;d+9vlToJKF?-cawnS%6gLy-^{*_XKU>gqZ`%bt* zT40Lp2}fif=%af=7ZlZEUcM+l0f)5?A))dfNaV2*N>r2>_a1Kiu#+E!f0S4!{D~E1jhJ)KHLG3QB8!h>6%tR~_4ZdY@2DSE7+hp*Vb`t-P*h`L~}D z5C@5v2pP?}MCrnT~D2jo@a z@b=@SHhlPV4NjEVnhvi2{miH)k!dYIUsovsF~wym*$w0^6Egx;xP0mmOgQfj1P7h_ z3y6|iOzSfvhGa=UW#u*aX8BgET=NV}@x7txIu-ixu1Ypv6}D;YVuTXWMOIrqHbSAf zITnwPX)$N54T#xCU2WH5@|+cJJ^4yOL?dM2F#~ ztH)#ZrK7NHT4jc zvznYu_k39)Mh)(rcJ}~;og+?ip`v>DMSI3}0cx};s6|O%5Bn)Ns0?Y zLC9#jjgvYFI2y{~$o&$|%0sy2s&P1PcrU*kot&$8OhyAPoi-d(Ck?`vi#K7(iod~> zG6DM3DPkS}^e=PgDl3rhJ8gC98Ld!QVvUG&8s)3$}!PPUzVDh;Bc;}O!uz&M2Fl1f>Lu~IrN8?ueZRQ%;M<^PnNN&~Mfrsw7 z??GsK{|dU$gbw89EKGn2*lO}&JNO}zqBZ#S%@-gsA)2AILSWmjBY5qDAK?h=0b}M> z&=>-Fexwyh%`R80dK9m|{67Q-Q=m3^l1Cj&EPBb_%0rv(*;Y~R)0Vd!c0bAu! z)NTF;CJ&9qyx(8N!r~sbv!3eTryKtDW5$@>|WJ+I+6w_Sj1XPqymCp`Mk(;pF@)+Ta4e&p8|W9GyR z)NOedwu+q7?g#Q8&iYajqDTG_ibuzU@j_7O00vG>fC;ogKzMYS4=qAS!#+Gd_gWEJ z{%p7NHgfZ=HV9~ho8u)@hoMJ$7d-ds8?a~0f+=xC8#w~wmeyJ>mM1c0Z>&eZ0+FCt zYD#1`nhRtC{z8CGziQb1Dw2)4c>1xcSxh|Cv3*1s|EGR;6%6^`!hGZ#acGRk99=!s z0-1|7mx4Z@ahQZ)oTJWA|T* zq~J1`_rEVp39M0zU4-@t4hev2`-)-4s<`U20i zfJ4em@|ooCkpAyAd?Y)`t216GarQc*bfS^ z%ClzZ%>_skU-UhZngr#MLBUf`^9ArgZHbAAu+&$H#jwUh);&-K7s&(^PrxF+4W~!f z;=bFav9Kttx9CXBKRt9gG$&WWd~%b*tdrjyTXjCVbyMkRMJr<6$9GAFt%j$-5xxON+S>z8Hz{mxdi6y1%5rdLQ}8}HDyRnPWDI_G>uj~ zQu;52W~6t6U8o`_CJ<-@tQALKJ@TcPV!50*p(wobd!#4g-dm?iyHNf0%yWC~NyNn` zikLsm&Wx3@U%VnHJunDc&8kI`;g>a5F0_jJ!J&XWmV5tNLK4Ahq&cR7C#?9+FWr+oq3#LJH{0nZaK zpID6;V>#wrG1l{yaSgH(pm5L+-!%iaV@qMH%9XWjUl*pWJ%;Y$v)NP0E8-Fw8j7gs zSifHQ@>B4~rOa!eLWsE6y-mw4=WK z1bX)DdBzR-{`88Vkhy((_vC{MdBq(YjC-oATcz%C$PY#7S zcd2?C(adsN)p5ipB*ADjHoGeCOYaFvpI*?_<;ZJ2m&ODXM!=H42Js>Fm~p{ig;~bt zbcDb?w_FHo-fA(sa-t*I6_v+Uc^Lir_4cSuUK11=W|_;6c(gHGI1}(C0jH$~=A3UO z4=)PC%}M=myGN|!EOgg1Z#bm+17((gXL^dj~J z@E&ij$wk1D^A*OA?v2#0^2ZN%)xp2LPhi^Q!H6{0!(6z{`<2U8qnMU$C_92)y?V)2 zVvFT?OO9$Vnip zdor>z#5w4BKg(IH*UGY0)6uL??5W+t30C|-LLaYf!CU@gyv-dc(a#traJ zg$hTs-R>FL9y1AfzCyD3er)|ACEg*o_L6-VJlOMIh+Y*Gib&k2Pd`{o_bU;5 z=WJRX^!}IZ3(z@+D$i}LI0TcX9%DxIQF%+)e0K<3b?Hbk1-4pMBNm%DfGapS2=Vdp z?liGe&v;c_$kI_Gh5!|NJ7qLyW&%nf;5e}ym(Li=q3e~>As)6PF(DcQ`=r8>zt+Pw z$ydCsbPt9N_54VMEcUvf3>q{D4J8Leuur>u#JD6T-~$45a;ZZc>OFpRUmw)YRr-p+ z#9PaMh+hWdl}Dt z$?&G(B78)^X}3r_-=>V~?;{JiT3-^lbjl!Tiq`nj1h?w8mK{X*9$6j@?$z87p9sp3 z!D0+nv9Gxu{MtPNS+7(KXI}vKSb6O!%gL?io!J!>zLrzq5g0K)X7|p`~);58*t(He)1R0C8~meUa!OGVItIe;b!I9 zD}0G9C3`VgEM$8t`9x6Y5aho7`U~G;9AKD{-8yw=G6iam>`t(eZzi=*5bp%Ea6M+m%mG^mOt;$7MND#Vp z>*iIaJ`)#e<%Dr!XePe-abf~$Az&%lgFd~|_<(t}beM}>0tzFbtJ;s@6HDN>vem4DkwAsHh9QT>=9F7L8;@t-gvH%3AC4hxilLC`d-uCY_dQ9 z?DOy8aL$QymYb9qCySueH#Fd}Kfi!GZ@IkPg0gt|T9Cb0UvZ(|2?}+<@PTPKv_eG2 zRfwS2DvlyNJQT^v$>)53$o+s%1%(xga5rDfxNr<& zVJ7hl-jsemLmIUsIQO!Cr*xahNN_G`^2N&zug)0qVTbfAk4%dA`bY!gQJfJO6xf zd|JW!PL#q~nSd7vR3Am(tkXZpy-=$xRXp+6*KpSZ&tUC_ofv%n91I|~u&4}we&$v589rOeJL|fKv0?M>rhKj!Uw*p^ z6EFIe^vtM9H{;XKm*e$!7D;6|h1;yJ6RoMFbxTr2WL(-Y-z6d5tD78~^Y{5^g^ZbY z;bg3R>hI$F>Qq4EbB$}xhhH;+CIXte6G%@P+Vr=YUhLeRjcq#);H&ReNoHHEHdw4y z6r3!<$VoS$rlua(Uo#z1k>NOctN@F?UV$awuf@)FA0w+rH_-Fnuh@u;v{R#>m8-W% zVzhhTVN97g5>qCP!m6LQNYikoWtI5n(+^5@9XfJC%3I%Hmi{IIqq0aq@(PM@!Gw{h zt*ZwK$I>6x;n4Ok&^0+hdiL-Cepgz@>D#+IF1u&~PUIKj+FKt-T56K8GhdvS;RJo4 z`#SaM5s%~h#QIi(_rjmF)MjVVI?T9ia!a?oa!nC}lA4+-=3ApsExaa-aePFcS9+c+ zX&(Xc!Bt<8FFweow=bW9o~94r+ituJufOm|*zFF$RGO}kN|vhTR>De1LR{+F5@3*EL{ zpPFMAO9#;_hXuFwnBwm}@|zpQ zF_c>AuopV;<^H>`!FLP)dzzR~Bgj>lQh7z&ZpyTl;}R2ko<8)44e2AfH`A!2t#s=R zmoM7r;?pg1ERC z?;7rb+InQ1zVp9ZN+MFbcERek+pu-Ver(;oA2b5lfAFYOd-HXXmdv62XQqVsXz5aY zy&0h)t&4HD)YJ2JNr-LC_scZd9x<&(X+Ymh>38%04eXbR9lH)TzvR=`i3w3?s3}LV zSRnV!6uuL-lj|_`mf!l6gJMQ2LBd^e#PAVfp?;%JfpFzapd|w2N=!%lsX38rvC(Ka ztqFVf9YMd5S7ZG2J2Cgk|KZq)lhV4ybI&|<+Hz2Fg8^J+m=WSk?&DMCf zcuRpgO{T`Fu;$PE@-^8Q5*7-Yg7QVOilu{Su@;t*k>QK#o9a}|XoW1ADi-RuJo{gm zx?Lbt-uH`cO_caWz!PvZl%i|8x&@`BPNa2-?8C>f;QfDK=EW15qAYLPx>tJK^`YKU zUQb_hjiz#spF~3Z>7zqh8&X<|iH<~tIFw5le7ddPu+v$zUN|~6u4#FrzSinc%m{@H z%*@P0Xs}T%GBpl$TQi!kGXZZC&{-=G8|`1=M95o;rnktO>dKjuo5Y0Je!pUagt+0C z=_haVrursN7$H?l?!wK}!9&MD-eyWm39;c~gi`Kxvz*pi)EDoQmJqyN`56@|YJ@_z zU2^d>e7@`_Ao}ci(=%-6Kbe5n2xx3IVsXCFt<`IldblDfK2@&_96XeZA69O}1;P!O zJhnbsumn&2;|;0y2D8P(RqMJJFHB za8}oPy<$t=&lov!Btk<&y_TxCExr)Y1f13y z;i44eCWANb_-lG+eF5C8O2)@t{O9iw5gvvqm)(Q#r11z#8jC;8dj%i9`83kIB}>sC zWvs5SnF!j9Vj63`%6YvdzhD}f7{~nlu6tNjWiK)7# z4#g$qptvj%B7)$ViMFhhk8evcTj=_xEqkT>@o~`@Jz{|5!%I6NX{TaFTC%jekak0c zhlPNip+pNCCZjl)Rz#|7o=Qq9P$j;))6PWl3@9tFl6;KGMT}On=^j0k)Fn<51y^2& z!y)asB>P=)W?c6u1T&RG!NVxD@p%{AirnJ`$UnHm^@z`ZFJHMG3)VJx^>fRdw+3m3 zY&`z^M||FXGC*;2U}V(v>C^D-;)h^Jn;`<{_}|JXwULv$2-s~Fm`ta;Oev`a^g&NQ zt)(srt*;!DuP=S$KQlEK3YsrFQ;cW?(^4WaXf6m8J+gTONfL~fM2|)r&FlDf>CaLu zng9LkG2x0fUfSYM^^tJy-g{Va!NX{Sj>b{h5)uzxbHbjt5?4NWw};Aaf3b=bl(4Wc zj5%)%HXmOLQ@VP$Ew{frIEP{hhz$oiF|p6ddj!Ue8i@M(1|cd>OQVtAJ=3K)F#me- z1JsG@5B&OC?-}DpO?sUU8t17{(Qahv)FYPS-NJJu9KCw=>Qp&$W>(GwhlF?GQT}9h7airIxvQ|iVB0@4Z?@>hyO7GsiksyxLGoRen+-82w1iVf_ zLxLj0=yGBLtr3up-PGt>yDi&w+G^pb$j0Q!zTY-~W}_4>C}ix-H(m!@?zh5)k^eED z*?Ii8_XtRW@=M?;@72mRsfmEiCZ@Y|p7#oL3wc2AWS#9j9~ z`kVHbD02O&qYZK+Uu|6j9GceMxZNn?oR8S5PXM*YF=dK-kv-2@pv(^_UQnpLH_o{R zPyhYjFm@Rx!bYpYWSP5(i}N)BtuYiewQU~+;%gh-t^c_fKEQ$O+*Tha*WLGDf9kaT zA@uMUy)G$fuP?wS3zv!GB!9yEH|MolpFiHHs&0T&@7Xv$_G61NTZ~lbymcieReVx+ z@7`U!(Nl3;oH}Dn9-~B#Y?Hqe(1)Q~oUQ9Gi8eXzT>r7u96D~1cD2)v`FG}jEgeu# zuDcsnyesWkZ*Q9)eC#<{Mm+7U*VoUNS2aK*zJz!xS%sIEw!T1->nN@^D}Fu2W$2u1 zuY&d1GC0LP0ZvT7+XU>oaPgt*KY~J08Oei+e(0e2VT1eNlIi25c=x}(?|K~0Ie};X z`IfgatGd4O$~tkPV7SNH;5fbnSIxd$gt_s&6F^5Bp&CJ<6J|2Ir;AOHYdhLj&c_5g zOu%W1Lw@0@HHZ!uq=Jl3|KGh*2o*XQzPa1<-u*|U5GizWSC4@+(RaYdQv! z&ilP^<~`C{&5m8!((BK^`cY~zt*_AG_R}xFU&>4QDNfhl{`HO`LkxGMn~F**p$+m( z3^+RbLvJa;gz@7$Vw%_Ts76qT$hB8r3G4A6c~`yH-X2_$ivXRjlwTAWfBY%Dk>_V3-OGgIF2vFl~>GE z^yq~8PZet@t~VNuQU(p|i#^Ad!<2E6>oWgl0v;!z3ywueo;L#mdb~+(R*=%QadG)X zULi&f>xU=*_BzZK3%0G9FA0e&^W4AuPnsJ0dEItt%IwwW9~TSqtI#Ve4fL8)PHsM4 zeq#Yt#v`cHd(0h;Ic>FcWMI?0hp zqm$po*HW|tVY*sOoG`(sZFHw{RgG4tK{w2~7FKZzjlI6io%ZoFOh6U_+K_luR8*ce z9VAP^3N6GH-I!ctDba`jvM*jbO<3sqp#E9(>ooUd=|Z#rQ8eV$OW(kmhuen)2cc(X zH<4HUqJ1L26sB+wczcQUW9yMGar><|slq*5RihPpkH*Br;F3!&!m@Q=LD%P&W^Y@5 z$pkz{fZW(kq0wTWRGHW>74Nx<6jE||MYUv!Yx;^l`04DFWk0UR_MHbo(_&k;?UNSr z=|PL7^+2yA9OsW7Bt;Yc;FIsfR9d!lUh&rL`%zg%$7gzWPlq><9?vg-}I|LgU&*s z^H%=x)Ne3kpmM^qx7CJo-8-CH3dk%d4s#_eLQrnK<;HVWubBI)8=(+6+Te4`P1ob~ z_ZGr1Oq^z>QKc>_=1stcxJSSdl8Qq|^D)-X13A!Xi7S8JBJC^bk&z-z^Id<(AEijZ zE5CaUeD6!Y@c25MTLgW| zlD?6NiI8%()iGUDUaGa^>)C-JCJqmURE26O)Uj`bGjVgl_XppWcDbs_8^AH@JBBoAc+Kng^{CJ?vC*&VoCtSb=jkykyT_+$aWug?rg^pn)A=tQOPZJZ`gi=~oiJ8~_7XN&;(NC}VQfDzB&lZDOI(2IZsjoZ>$UAMQ%z8dg$T zf$kZNv0v!=?!AYF7$27&p(7iq3_4K$WMQc^H|BCNbCsc%-w!`IA9Y=?mF4rt-cW(M z&Hun(p7;||Q&ZJ~pvg|v3ktPlLhb?aETz(-x0bA8^=#IRN7v|k8 zf8kuB>IhU;)!~6ZzKe))&&c8}WZwHWCJsx(b=O|4Iu>NGs;9hM#7B-CiJm=DVLA4_ z>k|KF0v;itk4QmHO#@2G0wYF?M9KFN#}~j-v=b&uc0M*R9%jQ$QH5fgxcK)^0oy5Uu4#woPEgx`S}W3~l=% zH|{!Z^{|LV`TOs=ORt0 zt2Sh}^$6b!00K3&4LEf81dQ?hI^9}p&bJsfau~9*8spq_xB)Gt#^ca7{+In6R(oU>|kdwceD8T$CpdD2TF6foxcz;izS%E`2?6$7{-gLhO->=#UeRNM~gX7%K)ci?`;dS6cTr_mi* z&kPzg2%|;}hV}3Q)pry(KzRgcxzoS1;j1dLtJ$Bx}O>`p8*SEUyB^TvI! z8)Bi4Oz)71w(30CPyC1ne{-)`f6;WP4CQ269QC5`fokoH0KXR(I>GBcu`AVb_zQM1 zR?fT9Z!Dps6%479F@MoIziAUUq-6qhFw94ypLOYtq5oTcs`aZ*yG2ZUeIRYjj*X3N z{XrGq@q0m`zUk4U2dQwvj?_n)J~3Y|hZIfVWKlU*h-eAM zj7!>Wr^8$U^ZpOTg8Y4mi|e4val32vT?UT{N_OU#;!ug!d;xsd;N~^Q$H(J=`|p8y z-+Q3b)HpE#xd<5J`k_Ieh|j;}%$PP=1O|Icgv94ndnNak+H5oRz7C6j*oNHW z#cDI1?QMa;kE?g1Kz#WO>Ncr`yKP@GXD>w0w0O*#HM8vp{pfB$3JUe#l~-JW)Pyj} zzu1rZlbhmW0`yhZm_8k^y#FH{4yTW;;p#g>prW!CA1+!0Lyy^_7)`s1GUu;{zUCnA z5v%g*-;CmZ(*ssev|25G|LAWKW<3fsN0@Z~9m7walK}Y@7uTAw@Y{{&e3l0K&$HUpe!TPPTI}3=w4=pvJ`WQ3^s9B)a-af1 z{eRW2^MB1pzCnaD4-Y@|0Cc*JpBwDKrg^n!7L+q^lmGZ{#Mb&=DWRy}BStDtaWVj< z3qE;tn0&qfjb-xx1CjvE%^UmNjOSlp%9|k-_6}IJb}yE!+zV5`JD|}S-L}r0^Bp2A z2l2%J&6PY!-OlL853;zl(7CjCg-*>$kJE}#%X>m#94$P>mzY5N2pFO>VeEYq{_?NS zSybAG&SOun*|;C?E?ke`ArC@pigddkbME&Ds@{qJ`{N%F5)$HeK7W6Z1*Mf9rlIVk z4?l>+um&{jr`Y(s8@d%#e7PM246(go$h;DNec?+Kl~lGPCqF7T0h)?@Q$+m?8gLhM z!R~u^HHet?x{`I$hQ{#laJkJ=b$Ki(ZFC=nF`oN}-yz;qj)sE^+jx?1F#-1p7`qIE zGxbvZ=^vjV|74l_8Tm=s2yEJR82@>FxmZ8B3;M8Rx65tFTMKRBPx#9de?m-*`y%9a z1uB1#1?3zaXfl~3QHgXMLxWh1p>8lIm_YSWa$*^Y{{JWD*E5>)i&^>_HZkW-wZNNpqx+KlUgL72DBpbZKjk z$##8fj~~XfFMw<~RZ);JaVT`%ufmhheU4rGa#dlo+Kv1|gk5?2(=|d^?t?BY$?fJ? zigvhp)|%Wu1cjmtQ$WZ+&3z1MX=yE# z&X-&SSWsLf-27cvSBEF({S_6CE->}FM&x2qaT6Us)&>D7KGd#PFls;&ZoPH_^m^Xw z&<3t{-J($S|9tsd6kCKFcdwg-o3O#{{O12}@7#l$j_l6tA6vESR!yzt&P>hf=#EL zNyz(Q9^HA<@9zEGc0!RQ3Ax>!^zF{CtJ>Rr`<`=t_j4|ObI$pl->>NlpzXA4$}1^p z{Oc~`ZXm!eWI=JGL(OJ0e)XGI&^izV?aUpbHb}YA92c1)1e|sYMYG+HNO=eT`S}7w zN3l1Nr(IuJ-HccNPy~6>YG_i{iJDa~(^R+8@HPJZm;W{;a{hYCS}4zg;wD8wRDS!) zYiQ6rDV+H($mB{lYvN)|U^oGD(`ne6PU0s!*CB6y*6<(s+qeh_3D6D~euctQ<49uCtt?;>wbGJgETN@&#MpR$qS{Y<7!dP6^c z_s%hN34@vvtEfqCt}-b7VXFSN5r zV3a`~*2u+~z;^_kb~AYy{neFv_Jv&=Ff%)u(TqVr@HG;0#((h1mr$f^fF^mBsOc^z zg<`T$-uO^^Gp(Qe3wSf*7_dkCY5&XHUu0DojgE-(!qfcWp}*qzsS*Uur7gL^aUx3d zrA%OK0#?Hfn5zyTZ*D4fZoMD6VC~oirr@t_?cI3uFQ?EkKw*V*o`5_kMid1h@rmi$ zUgV}l;vZi4DO4&R7K#G+_lsFj+@Ur~nbW6>@!tLe6xVzgRFTt1xr$FS0r3Qc6?|)b z5p3-j@MysjJh*xug~8=LWECZRQcA{(KVa4O7UQWM zk5h4WghkAAvsS~jDd@Pa+82nUkOL!QD|g?*pj~hE0*2|wL0+p+l8HrCyL8) z@=OJklrKP*vI$CEf}7L;OUqeU8wx4(_%D!~o9lKxTyQu67L?&Uy!vfmU;zL5>g#Cj zwv#{O)8x+>;??@OGA1xYz}iy_NApPtF#snq88f3 z#ZXLJLk=+F7T<>$xsH@H-ccT|Z*{`fb{3Yl3rJ6i$GQh{uxMTug0$mLSvb7L$^Ry7EID6V zMWc~#Fko^(p7;PXv}PjYU-sl>8~m2mPWICDU^brH{VaQRc>;5kvMea0fbsU}va&L~ z@z&c=(qCAbRB`bkylo5D7x)C6^q060K38Tag|Vv|+0#?7V###Oo0Wm^FzKZt6cWf> zuBb!FS2bww?1nmSF66O!P(_gEq!&D}tR3Z){OBN_-2OP$ty>%T4}g#EkOgHD!Rz&U zS1gwvdnDA^ z51lR;Dbr#xBRv{v$uUSuh(cUUI86_EzR-WmXr|Fe4_eyvsB6-rra_P9mUgm20X6O6 za)f44K(1WR)<{O_Sf^zGrtABV9IeJb{d^B^iywuPH&3&mc$21xx(bVq;m}7PLqiEz z)bR^Ml;lg9fENkSU-0(9cGw15V5hg-*h=3!V7b+cAf1jzGvNpg38GPq7E$4J)Q!9@ zV|O^v(`QC^p9y{aM)VIFVbFI&h+CxAML`}M4`*;P^v)f$IaJ|JHC#p{0TvXIz`S{ROG^u0eeDel$fBX1@fhSDEwRq_b?Z#bhDLYrAYi;_G}Bt2BFG&ly^(|X+N^rcN4Jnw~(5Aj9|qecK`Tk-U@#= zBFUa%L6L0NjL)!3moDLtf8Gb1CIRZKtrH$VX?#}kRZKuU0U^?(x%LxSJFjBfwyns2 za0AbAiO1;E5-cb_#qXWQEfx!kjvc3rU!Oyju#`5mEx0W9UIL0y z+T_zrz- zo{|rB!Xj!z=Jj^CLMGrY0SirWIT{NQtajjuA8o^uB}*t;1V5wh@{~$B78I%U#01z; zQ&WQjA09?seIrz<4?>kh$-Fr_XS((cIb&Wy%JqSy(*c$%Vxeu(X%K)^r>?l>^(Kc#KCR;t~2q z2^JK;=qAsN6e3az-eLcH;1GHTtxzVfgDQR@WE|(kbGTf?$OMED32iQMwEPV+a~HNf z`UuvnSuJ>Rh0i@9^=P^8C=$jHd>ok864q!VC4nRMjR zYtUdY;P~+qICZ)h>X`e0gw;@nCP~jwwn&l$gt+sLwi1|H&LeN}A_~T}5h*DtlC+hL znpgrXC=<)quWC1$Oeik?0!NEZzzQv#v9x)T+>d1njbGgd8#@UEoOYPW4OrfJ9`=D& ztXsRz4$>gwun{NyQob>RXu(G*ud{(h(;>B##AeB3j> zZQj=33_DF}Svt!xbLM^6^!@diHEWiH-GE>E^xJ&)1@OgF!{Xu#>VDMb;NYNZy+sh3 zTV^L@amy$KOdez^ogdu=o9YPxr`-x`$7LwGF2H8&!uqvqTwYu;F)^N0%Vh(Q01HY0 zcIrgklCaO^*ojlPRC*EGsOfM-&4(s#E_;hj6mTEb>+;&7MDI@hWt!%?M!9)&urYrf z=FguGxm@l;%eZMN6JSA+a_>mgHmlW&ii!%9oIQ`rmr9`yqs4p@7j@!d$a&-<(O$c^ zSx&o!cDR?p*;NKh_f2Gxh^$(%9E%n$LU3^Ky({1=raS=_lqvrRcm}{|G@`V$6lY4# zp}M*X!ErNTk6Zv%bS^FIt39inOHVifA$6jyyB0D-8BDaxJuxv6D<4>nrAwEB7uzQs zf50`sf)a3D>bn~f)>|%Kyolm6U!u0Q8o^N+u!rT4xXh%zE}WLncRe$_WrwMocD7eS z-gh0Qu4=@^Ct}qD%V;CwGDJm14ZoPbji10;)hj<^Ux3@ZSX^$S;1Av;V7J@R*w~1# zFP7rM*Oy>0=%I0g!B_so~unZ`}Gay%U z(k1ul4ln7nn_=y#hoi3!iotpqyX%pXnt>&Y=FtNCJl>2r92bA{A^{c@FT&)i#z#OH zz1+Nc6E|+$K*hC6)X+MNQWFAY1WlLeGD%?4p$LxWy)fg$31K?R+EWYk*FiQ=4^w|9 zQq$5gXZB3wWM?BMCx=HN<7CN~c}QUY%iCXMRq+ra7x$ci5Y4isrNyN(o zYfZI*DD`(QVA!cO9$vb4cLu}=oOTM`hke@(W)878gl8FKonk2$;=gG&eV+p`iiw4NWf5Y5%qr##@8X>LQ>Fjt02` zz^;mdOcM!3P&B=xAQ!|3vhg%AY1cR`{UjcG8hsdO4vX?TTe~5*=;>THjD1~Dsnv*% zjmPxNbY!HZxE4av($b*QarlBs>m+VSGyxVA(a5-f)CdT>FuS_CAb0`@$M%j+w0_&^ zI(F+jaciI#Dvb_GZ3q-<9mSL(j{v0(4n+u@bQEID2wnm*`QN9E(B~vkQf=(F3$HF1(yh2b`8cI4I^jY=cnPZ$W13htoO$JDs6})}iELaz`9FR|fCa^`x{HlP;IIQnLGXW;R z1en0Y5^!5wCe|#joe3}jCcp&zjQ|UZzx5c~%LJGJ6ObqY78Hqg44cISm;e*-Hv%ju z{?=n`FB4z_OhBRp{6SD6-%FyXvFX^je$7^iVq~+J025#WOyEue{va-Q+RLAr025#W zOhBRpSWqO|F>DqSU;<3Q-w3dv_*;*$y-a`!Fae1YkiC`md_Aj*M0V@8@~Xd;Y`e53{bB z>8YtzeOGn&T{U6yvSRQs*f0P90A4~|SP=k#hynmW^PwSuSEO0K(gT0M9R($np@Gc{ z+Bg(=j^!kx?xbXE>f~zRU;;3+v9&g#aWrx;F|lzpw{^M%>*56fz5yhJ1(e+~&eq-C zwA3>BKAPHf3N-TbCGuG<3Rq=QXSI^|ATfoMpvnI_BjLxV}m{REScJX~5i=*Mtja#zb zs5+d!{hoBuk?ncGad_f$G2K$CPEGoa=zkjMuev#quSoxG(>Fc&4F5erLLw;{92(-_ zf%v~yKmf`_z5lxl$krXISyMFz%{;==A5rsp%i=jrOq%&C;bcyFk6Y;6p4?!`f}bnf zqQ)hCsW~XJ6)=>EFbA5SZy1j%eZ`6b1v7p7Jo`PHns!W9JxW=bS@2eMHy5Cr6)OyglA}v`WA4sXc;k~1yiVV;Gv@C*0&%@Vj zz_zSF7p+4>Py<`*Y$0`$GZ607RKK@0AD68Uns_=mrbe->5vTxa%V-Z1MK>(m`7mw}#))`}`K+ZS6{S zH4F{tEUiUTY(G))-Hf4J;LrN(sDqcT=pkQRc4|T{2E=R_`xK8wui#8`-!5d&)-d|) z^vKpW*jA(deC3vbH#axuk0-02>zQK2IE!3N3}&UPUC#=8THVREuSt(0JJ+1y;_^TMz4HmEmN<;d0^WF zlseTmJ)JYR6#1Dz!MH!vt04e2MUqlM=QuQmRTMnuH^xOFDR;difeX(Q(h{-d(;C9j zB)>Vj8jECy47BBWalO@KH@aNGS5BOZfq++lP7vC>%dR4n@R%ywy%buaSc0CM$#4izUn1yWG zy(6=qPm*+cp^|%b3EjYU`yly?{#=TTtQ~6RceN7D3YXOHkzq$N1B-sGLk}PTX|xNh zFS`^T?3&o}g;*h3%O6v1XEKOo6EqwoOcvMMxyYt3GGYfeRiMkJLx@&jpJ+5fgFXcTt0m_K>) zh(ce|X)qtF*0n9b2l~_DdIw%jLKh;posxquHGI5LB+(TMhyJux4vXft<(M*hQQsVc z5*OEO)ADg7pRu^Lm=?RPUK-{3NQliqj26qv8(C@_Gj_1;O$ty0t9yFQA9f3^46nJV z_;&>%%P0IM@lZLD5U9cdBp>qp6=MPBOA(}t#yZoz99>1=M97hsb@LgmlZoaxfsFXs zBSw{ttlvVR-!AT2E7>%+i*2NqFGnU*au!Q-BAKV5VI{u~x5`@B87H zyKv6;42DkZ(~P5Ct+BJJY86Glno(>s!YgK`jk%E|$zDF&xO^aHOq&zF{=K>R+HVjx@;E_+}TCc-~4MP{59irVgKT<8efU1v5-bfF-dJvb-cvu zW+EH*xl)Z2c~k*86iPhRl=~IoF4I7h${|D9+^fcxX()gsL#dT@-KN(J=T^N10Kkw9 z)hR6?{AS0d+-o2+2hxD;kq5zQMzcm{wU|oe#D$DVTKC(aaXbFDqKRT?X#-uLfAObe z8_BfknK&QBpQ_EP=K8P?Hyr%P!`+(89%~B3)JzSl-0l9BZf=#K9dAvX+APC^21Ne; zOVI$Tjm^^dSN)K(9F(3Q)nZ}2vsx{E=>;XGau>CS2m8^L#om>73$_^6uM!KFXivjN zVRbK+p7t_3UX@jMpgE-K^VgqxcO`h&j=n#jc%5~3JR@6JO)hpcCb}f9U${GLQ}0s> zXiu#L8v0=B6w+@p3qUgKWL_ zUkXEVtHf?`!`N7x68ij3+h3&@7G>~rQU)iS%qBCj*s8v52^f~9o2C|ZchUKk^+Kb} z6xj@kk{Yq5E1hz*^yGMGLAfKXKG45ob4zKlGSu8-#D6zPOV=eOi^5Q?6!4KeT)?bT zXUBBGoPu2=J}%@ZXDSj^I3hR*uSlWX3#ifR+^#g~mM{!7l_WTedrT*XfkGz`l~`29 z5y>RI%fl2Dnti`LdF9GCY&5E*iXGg58;b_dZ}@|{>64UPSS{IVs{HCVhcP%NNI@IV zC}hbsExw!qduVJeOdN#9bgFQZk}g@0JNJvlrvTU)3T%dR%uJs={^3L8p{m#`=^;Nf zr(DOBI*WcuhghUlW3;hZw3W%3g1AFbFQY=2%8;BQF;_S>)zZU^7T-K^rrs#-Wk$l@ zqUH$sllF2#u`w7%!qci+MKa;(LFz=SU@gJ=9h<@i&X_tTmcK5+m9j_;=`0e zHo?{2@kLS$XsdmB@_P%Bj$w~5lhG;nNoN9MVUKhvGaf#wU_}cA<73A<4h=mx5$XN9V363aiWPmT{(q?0;4o(@tYV7Tim^ zNzMFH9ez`xqNzR#MWH37WcCUSMC)k5^^Yg?#>Be1_<_GUxl8P&v;lw z4CyiZf2XTfV(YwJLdGXBlkFUpNNxrzevf*0jJjebi;52mM=r5EkIUNd1NUevXej)S zRi`*d&LrtntVKzR0@rE?yqmP~A%Jy;DDG;Nh$^RBjx$?zNoyW||DmjFU==r9uNzK^?UGmWd5~ylQId4EGI>!c~W%N zMoDvoV*E6$1h!lwU3|hqy`-xgKd3+cv1Mh7B%B4~S|rv<63pzJON42ZmFWz-yk0^% zq6pgf%={`r6;(%xGlXG^g4&@)zK&#KiAN2&KM=)tW#J{#CtMx{zUIQB!H@+_WePw9 z$15_^JX1x;V$&6G$np(_3xtoA=6BX8VB@T!uwi4&+J=TYNr~iU9Z|M2p|8rjsxU5> zVM^u~c4&dV2QV&e;#|&>qU5F8X&*i$dSaODJ2@Wsj>Bm zSItQ$brs5wrnh~kXa7!2EHtyZN_bBaZ2V4}y1|dW+KIk=VOCe1e{HzJf5Op3&+;XM zgttIFjcEfsi^hWc`#PG^VOnqPV9Bk#Z8CGR+5F{0iBt<-j)*mh%}r%!HmE zpW}1OEK4h_lVhyY$$u6-=D{Yvm=VOiW8++6RcDsspKkbw-tqR2i7i=77&)da^V^7c zYBbd29cq}##|Ms2X-*{(l(J&&RHaR3mZoVsI(TO0HHk?q@=|0J6Wf1{Br&TBo6(2% z=>NK@L_#`_EuG7+FH}EK60Lr4O(+`T<+)C1C~nJN`_-uJBxXZV%nnNXB5w~{?HsT;ZF9>T+Ja%@@f+UM? zNENf66q8Fdpd4Au-Ujn8!GR5$ZE85mzXm-y$Bn+;d4Z3OCyP=-t_o&69-b3;Y?3Uo zsh!M7lFk_86_@16o_0Y=7blf;4s+ZbiE>IHSt*bmWvp<@=W_p04o{z5eIQz0G+cfE zfgaDStBk{rBC(rrn^%&{)#q+5>`@96-vuezF~aT44X3z}Wza{#Hp)y=!Ui>|?<@Mx z+MbCzf+K1?+H!7gu3zt7#=Nvq5EdZccT_CYppyD>RZY;@zm{N`4vQc(8vaw)jr6br z&U5NmDz!z;EMzPx#hz&Fd<;!5DG}sFDVi zM@TMoh#+*3pm|-Hzd7c%{Af|63p=$ElMgmtLopSdh=s+D$+RIY%54x=`cx`%tKu3! z&I~Mrm5Ms-nS{q#B!#jD#HrDVR)2zzN&Dg(QNa5V0bJjvVTjMhDZ37w9Sci$VdjS` zOF06S6W}XsFM*^x)sBFY76G7Jc{xz>_D@48O+#^m6`+61)w2+PuAniuu?|*8N5{@A zqe%rey$_r2cKfTK#=%#W@YAWi(TCsFXFsiFmU!X#mBL#gxlGSk?|=(geNaDd{AM%R z3;3h6Vka!K4;|}(Syqi%8M}E7CR{V(GXyEGPolbWJsRvYEX%N!@L*1{w`Mrct%t%n zQCk#Emq7sLR(V+fi_kWvPJ`VM;5TB{M2X9hW6__Y)vf+N9ZG$QI4j(fEyyCM*mI7*O7_FS?TlA(+e#?+nJC{B3 z2hVL7mIC*zB8^5~r7uxGa99b_I*wmtc9Lg z?mAt+Vxxb{Rbw+!m5s|B%M#-qjitI(nSm0Z|LyAgsubo$lI$7SZ&bCsp46ONOY7na z>R?d)rPVLCqqwhT<$T2dY`L)h?nt}g#H_(^M?S@nzCdgvhM*wUgnxT~CRY%S!$4JK z$0Oyw;^)Q+O(DRSgKjV-CfbnAKa9*w`GY0qjqesxv@yF>#_-N5YTeLR${p(8&)Mt4 zCs;i$Ee)LO%^eK}oy%6B^RT)rh0WX2(KqMDjQ{+P3bZ$a+ze4Rj`?g57Mx()UW*E%3H9^fv85Dz6<&N zOcv7u;u1nK=Fw2C9wm>z8l*lIqmN2o3SvH9z072ss?=1L6;5{Aw}@ObE(g8QpZRs* zWH>8 z!~w@u1^8|54gdh=<#tQ8)D?~0rV3Y6514`<--xOOBffftk$F*|iyeH2{W@t{>R(UB z2g=_MWlR4d7C-L)gT>H(Ssr3VT7&hQE(Wr_NG}Bh_I2N3%#cYn2P5h9uDyw8A846K z02E{}!hg;t(^e?L+#?ZaET%s0pW4h%}>os-)#Z0QT~qgcd0n7HBgIb0SAEgoL005 z(Wn|&iJYUd40MKfH^aL@k@eCouQ|2<7k5lf>6Zz}Z^ED*+pzPvF9B$JVIRh;G?| zr5Xa;K!g9pgs3}46e&;6;=uIh08Y(T)6qc~8So8T)GjM;X~VL&N8hmi9)RC$u~H-J zuH$l9>E^?LE$a5-?9L2st3{*zFhbTPN7J+uEoQn$A{X`bE$qA4kJoi}I;pi%G}oe_>ZA|?zIjch1OW{T)g0~a6`yt ztdqW|UhEm_F`#)S5B(chiDzc$*vaS+4(4Vu)k+pbS#b0Ym zVKH0YV6=XuNYcRp00QIRD>lqC_795CP0Qd})8Ugw;9SWA<>3H^OYWB@8j~HR)`5~S4s5kBCGL=?x_Mu%_g*)Q%V_6}oIaiYiE#kjwQ2d}BK zbvxihr{TOr3ax`1*Ah$>9Vz&*)Q?uAyxHSxsEb4$@63WGO;>DGIF;H_?l*DjJ+JFt zRtyOShD+R|zUZNs>fIVBdp^{oaNOcbEB=P{=H_(pmy+1q*ek<$HUl@SOuLOPVqOn) z7-mjln5dP0nHKWi51kP`pA3!&oOSn?m#7k6o#_)f2i*B7*(^k4rSLze&1K`ocTWb^ zWkNdfI9WQc)mB5qOAWh7+|c|CsC^nbA;Ffb?KT^MoK)L(JfS*a>uSu9n6Ob(q!+!S zf-MFZ>Ryn2A!O*Hp$~z;t3kt;nhVWclaW1*BnC-< zouwtinfY#+xaCul0sB4EF7r&kbYA=bHCK0z^J0+L+s}|(L`MpY^8BFwyKnvMmvakj zYi_LI%68bE!uWRtHJ;B1Al<>q;=o1HxyJ`3B>@2zPr*%8ezEDBLvav5#xky2p+IDMQV14oLzL_Yh-j>Eb%<4&o`6dS* zTumZ!W?c;PMMJq{I3Sr0yz+Egkgx!L*od+(nU$o%eIIV#Hy&HZclwLn2;6rq)>m#? z+vgd)I*RBYknEX=hH7wAXp6rKO0Zp|JZ@ICO8D;cqLQuJh8%3b_-i}i>GJi`${k@O zwS>$aGxGn;V|7+#gndN*v+Xcdf61@ejGE9~ox%!xt2d4^jORn1{#;J5$^AQGVC)H;!W?cNK`#dKk1 z=}}sDiE|IRr^lY}-gCmM`TEvFuAkK?|2wxbX63S=5#pbm>dKAaroU)WmM` z2$CG^&g2wR&dj-}^+K4b92C!;D4@+xoIJ4Dg@fmp9X=7E^hcU*Rkk#OMb6@o*>6$u zz-rBOZ-h;(Ncs~r{cpkMdqLY&LZDtA2oEj;$+W%WoJF>+N(YPZ zSD?sA%$+%6Q}BGGk-#VJlTMbsyAsACwCVDM4phLg6A_^mY!FDl8uRt=iu(l{6KQ_+ZZ_=2g@Y zzT&nNtvFaej+-_EJ0}y5gNsn*{&4oPrQdV_jS>^a_XC*?JY_o{ueMdjc1qYjC ziPjGMrp=|>{}kUzp9e1d2q-IocOIY+5Eg{ablNarDR-eNOu`N>DzF}m-w&{+xhkA6 zG-#M4#!99|Mj&K1a@b@eXc$OhKLER`cRyh97ji)OF#vaZ2kzaQD;hL03fIQM%jELs zFqq3Jjn$D-m`G!cW#?RaP6EEXu_WJrz%&RF5X@tKnlahGDyoix;yUD&d1b1_9R8=y z*?bpq5zOz`P=J1LOn)+8pR=B|%;5atc_82FVQoL!wQ`?LQ=d(h@Ziqes=V4^=wt)_ zZC|o8m?I22Ycd!lAoQGHDZuu@1?~Fxc>L40_lXc*9^n%T6u<2VFW=J|fi=NTJEEiLVVgDL2dP+H!4#Lmd4`8Do}sr}w@px94m^G~fln7=RwHC{Ir^IY<>6K} zTQ}Qe1WU&<^nEt~5)zW0ek!D~Rntw8)+3jYj=Lw}UGX$0yAK+xBM&(`VJf6at?$Cz zZ}l`}^D?Nd2Sn^Vx;_NJNr?xG*?=mTJ`4Bmj~i@e$*R#iMFS4LwAb?kz|mktMNlt&rA6o{vOXv8bu8-H=HmKv3DT(1FNzEl@k4o1!Es|OlGl6J zGpG{oMj=j*50Y}OA@vJDs+_jr54_@hX6v-y9N9^e;tFrWggaK;1*OtW%JF}=&5&SY z<6=FJwzPs=a)m|aff?Ks&Du7iYge5hCo>Jt-<|Xdq$D$0B;7d=GfxkRf+dLgG?{B% zPn>W-)|Og_2jt5gRDMyO!YC|D*drQ+CEOqNNBtpXZ&kPf#rYK){~kgtZ0xv?ECdT` zDpZ-=^W?^&g#*H4O1AHd)|3^o4;Q+B1vItkoWzd}=oKD2P7_@Jw-zALG+(Tg^c67h zZ9t`BW*+C)2Y=O_6SHjMv2hnu1t8wUDn+$1ph7WM`oh*XN%1nyK_=-T89n*pyS+@+ zA&3R-!uQ9L{6d{pumDD8Mk1krj+u3BLXr6QsvLgFY zVY^dw??8Um#x^0`P^k6;dz7>AUV?Z=g~1sYo&-oB&lCT{ByNzSkp$O&NK#nH=yi5+ zO!J~q*km;xjCe#SY1=W zWCh}k%$Jdnt5WgSN(_k--kwc&cMsyOYGrfD7-59>Z^abx(Q{LB7O!c(ZP_xuj`q3b zaEjAN?8oj^zUB@?=*y7b3*LLbnT)R^LxbnD(N_pvR({nn#0lCzpFx2Z|J}s5-_AVt ztQMSKW_ZJ$ADeq)dVF#o^ElhFKBKLWq1{Ar|3*Q?I74+W5`Qj19ArXqZ%+Shy+TFt zhj<1Vtd-(RLC}A^tln+xn*}xF%qn~0pRr%nESSs`pKAH}EJ~(S-=aij4e95_8`p9} zL*ZN`WNLsThMk|^NwyDaUMXP&Oo1dG%-ltEq|%8VMdRQ-7uwK`Gc-?u#03)BO_;=pBN=EN+}tK z@#pydR{49Bh%jV9St&Wq7tY-Rs-(1wJDQA<{F=&o%Oo6z@RU)me7@b4L-}m9fCT1; ziS`=cUyZ7(H_)+9P56$S#uPV5MD$)!HwP7knwUs>GK(dR9WgAv_1!)ZmE=S#*n@Y8 zL`5X+%;Za`AvrL?b~mGKH=`~r`m$P(YxiIcicL{^^y@>~dtJUsdAzp0vRQDH?A28$ zF+XvMD1H#Ow8@ckFyfevlue1VVw_5GEu*vJBN~Qc?vLU{nqbM%FfjRKPaynK(9v%9 z&?&|sp>g`-G^NJ=n4HaohmA5=v4ot7WI-pv=S)`MRa3exdt7W26P}0KFBb#G}m}U0VW2MEQF~24>@=nTA<6)Qe^f5KZri5?gkw(TK$mvc_%f|m- z8UcxBV`kCm&%lMu z#<7Y7jcoUq$FxkZPH({zO-+Y}*BYW?J57a1JP7v?FbatPL;?Fo_oVnO?CKKA9 zzyb?`pF9=C6m$}ED*Ot}^r+~0<%MWGK6Kaz&nPcS*g4BVtX%|n_xt1&%+d`7JB)Mn zuUl4*#_sAm2QP3U{uApNUny&u^7cyslgx`T7YE^Y*64}3r43L%a(F*pq)`^X zW9l5;y-uagJeoT}q9!W;u31I2jRzalK4WjhWpn3uJQN6KEH&-7s)>gfpXip)UZ`e zWKByxLYI~SMJ>1@!Tq(I#E~y|pv;__TZk@s4W)1cg~D$o?}Ae?=tG8_w!|6MyiaYj zlSbjW{ynICjXDB+YqY!E3nd|W<50ey@T1`}N{4hx45OAS+qbT{B`ZNQamWcv^`U_n zSNi@Mt`=C0uLF%Bjx3x!_vBP*+6#c&JF>0P3iGRG1rd|oWqZIQR~3jbFu0& zB(#DO=asFqw7?2GYW|qOsx!AtEH;9i2~L zE2vC=29>His-bLb;rt+3i!#3${ikN8ZY`bL0#{`CGGK96G+@@hP$%A3O@7a*yFDQX z35iDhD~&AEk3@Xqj3B}@Q5_)v&tJo?&qnLwsqln-aDAr@ zfRrsuyUd^IMwTg=^W=yx`nDwyMUej#1OZEe*q$>6d6jXBNTSfdM%Q`kX-<0hgoUL@ zV0_bICq~7lFvf0>1_Eo7Wb0f0-1?Z~{i_Yq^ho}|K0OD>Hm^L{{J5X8t?x#3B_ONTnK~j?(Sj| z5QK#48yXsx2>mrOwS$AhZ^e>d@$ms~Z*K(%A^+I;NG42TmwIq;(C+R?0hiYcGgcg& zn3y;vB}JSx1~_Eq?g}N!!{g&Jq>*etmi$!8R1Nk=6UoWR)laVe9i$CrPi7Jv&uIpc4D0hK8U$A`~f&9I{&PZz5~MDu#mSgf8< z)PUmHVz005;Sdptk;As8a)dCkv3pPE%j~ZQFm~4*Cq-;*E)+w={}EXKgTW&5>1fB_ zALQ=--r)XZzWFFOqS}5}G+GpB9glbvqzra@P$&f4?(S|taB#5Q2ubqRj1=8I-M@wk ztz|!rADlKvo#c7M1@1lV=2g^s%$WrhP>YpI3lO4!#*jrq>Nz?he|de~etfL~ie7UV zqj@9w*N+q+&75c4yu5IEvk|M>Z(wX%k4Z7%f(sD-`h{#A7!-p3>%+NvlMSAvq$In_ zzeTCj%x348jAW#wQ;Uls9iET9mJN)+xFqb9v{Y>X#(U+DsPE>636z+ZmnVgX{Lh=F zYaJFyqaH8TBh0eBSvg4lO3LI}DAx!`x9!9Nj*lDAJPf?NopXf$YDR-&W-{}1G@W<# z?6uVDOhw!EcJnVhlDZ3J(yN_MRRaYH;o#wEoZ0^!oUuWTWM*_Ea8QuF{rw)pnyZmx z;0?2q_I7Sw$$&)b=~(>;0PiN^9-ma^x7OOmunPoqj0;otPJ+7 z+sy}Sg@qj%khmhd?tHIA!W}upFDq?7rVDSA>b$k7CC0OpG8fUs@jt33bZJWaDfS9EDAkFG(`SQHm&2U-% zH%3gGZ=|iyex2K6+eVgw22pMc))$^2txeMbXEt^F$u8R=1{V@DW#BlHl9JN%c1p|2 z|BE20qjGEuZcff{U~G_;VSuVJ3aLJA>Kd4xe8XRD^Z7~k=>?s<=FY+Ud@cCLto(Xu z`y2kpwvgK!o7`4vS0~Jt;2x3d@>)eG-fGC^$DY{J^d!Mh9bGUzsQxLrU%dwKZEq*| zxL>*;0d8JHV<}HCb~{VFP7UvXF1NSEt9O<_BdlZ+qO|nwcRB+>plY1{7+FtDM0#B) zRLN3i?&v9A3q4Oy?81Rv;{TX8w{8W98+;rbAz)C^IuNuoHU{s}{|a>3v8Rvc!)(O( z5pWTOgNBW;C7dIl9fP;t^&pI_4?b?sZ=eAA3Qhi69#@`l+-%`#nRNN*>8f1c)ToRx zCDrYh)yv*i*!!m@+jpoepJVx8xsvMZS{$Hj`{Lp}4%z{0SOs|f!hWh$Z}?!)w*T?_ zEdE5#^0?kgMfm(aSbZ4d+mCb}M4uWdf2;dt8m`UzGhq1HWzBAzLkbT@RaZS!&gV|{ z%v~txG~%57v7A$guH$b!-rqNHa!UBmw99##sgj&;+Yr$${{82|g6i8gqOW-LuQaEf z{x54TtJckjS)SDfeZlS(YKdioNmQG`T@?*GfuHsX^W1*ku}0mXJ~io0D}Jj#AaSAb z`Ug1FT_kov?>V*a8@22=R`T5Lrr|Z>207GSN8q1Uv57CZd83`vC!hf2R14eKS`2V) zD$R4{dccRSiK}+Z_&z}l)x0sY*BuDwT5xN{6HMy~HcPC2ZtI@biep>BvCjEeQw1A! zbFPyXCv*O;iv*V7OBMiU3q~%5|AaJuU{H8dWnyIwUexo&^Xc*(BYG<=GN zJD_Xoz!@0hYT9+}dPZy1e&dz48->fo`kDC|uJ`85$Lna=^2Z=O<~}`Ay=%_k-;WNa zHDZJiH$ne`pOrP~fm4OJVDR=vZUnUh1rW_(?>-2J$Q{zS@Y!n%8e4!6WUj%9`-Wvh z?h8ExXm|GU75vv(oGhuEwqwU8a)d&zri2i;*}9&`6+;+>sL_SKeN{i9pOKD?GJJ|! zoOY7+|3}{t1L3oJ5!=p*Ak)NR zg^<9-+$hrkTh)BB4#M`>f#nhM+f!whG>?<(z2Z;nfC zzL@f5_R*$zv<$j1dDo!ks%5e1d7$*siyyTp56X~+`&oIbM0^&yVz zuGC@onm}K3I!DL#e5Z5sFar=jV{5D3A!yf#1aO;fCsm+K8?nC)`kW!Wp#^Z@*x%lx zmydIVcjWN6U9|Ax;;0$)K_u^#mPt=l>4b4d8a#FL-tlz7(sg(Mc1JmyhrQq{+vfn| zqd&3E%?-Wa#6l3Z6O&zy9;vDb)v$(6s|M_1k1e$=S}{-AEaiV`3Xl%kzbzTydeH=8 ze5}MIrdR&FZ-uqL)5oOaek8l=@&SLQTFjerz-z>GR*&U7*DZn`9LfznLnOYtk%GqK z4o4Y^*$T9Tr|meSOrc#5Y$%%>VVA>=(SMxK+gUf6XXj*ubfymGd8gaiPnH9;P~|LE z3~^BH-0K?SiiIHN^3~(Kdp_(6{{R=01sqy4Y&*;ru=~Da?-iYolIH?t11Bf9i^)lT zTI!xxo3+s49yO$;d$mAa4)FQ3sOFnND}z=$;)D!~v1r^L|3$q%N8b&ImZ{*!&31Ru z=g}qThpq?8pLA~!Rk)%0(hfBq=g;(d2E7A=x79@1@yRIvxbXB2gM}BeWdktSD=@^yt*XhpJ<*@A}Xx+6_K(s^}m z_HC~pxgE7VDV!3sSKkno>!pVD1<(cxajU1*BgRr)bUqo0St;>oki>Ic3r%3 z0*Za=4=`b-5xwz`+^bRuJ?yb>73bU%t?gvmfJj{XCiuaI+aAxE|VcpSp<` zn|@po(0-hhGnT=QX~!15jOXjexGeukw~ukzGT5|D!1svu*45=n##;VIY;PXp>h3bT zCqCv4_4Br@=&DuS?i2B%=LjWxD47cZ0GR05Ie$8F#qZtJ!3!#Shk80&^aaeXL{#r{ z!w*Iy=6n`lFtFDnxvF@feR^c0V4SKFLD_D->Xq}q-U_LI*~0YGd;g8}F+o5`rv_oQ zzWX$-g8_y@R(&bL$4T%a=e29b1NW&pZ?xush2Y~I zHqmVgL$;E_EDs|4`Q`no=*CsQ!{daRk(U!#{$S(vXtmk>dEo(3#%4PR0}Dz|raRDur& z*iO4gyk^re(LIo(&t2c$u}8i@P+Ta0@M9O`BgO}UoSq$m!{TPN@8`kcb!D!Mya61T zuMh6#`vu}vfL^B`jn?GQD0dHzCz#uHgTY>V9Sq-x1H>7dUhw>L_jFHk&FQn*Roi*Y zcIV{>Xw9{k`o^dCk5a{}@fY8L$*i@+NR1$^tE+6ztLf7PVyNz#0I;jahb`AGZ@5Pm z-tZ+i-paNy_}B6D*@yf7Z2>pm^TDRB2edbm%}AM3)o&i=U+L~I2Vxu8wm*Vxw=OfA z_{W&I{xdfwWr*myZ9}-D& zlU^-4!J>za?=|A$A0;-6Ih8wFs z1fxZVgpIWyE|)3vUgb;LZwskJ%KFFd8-vMalkiq{r1I)N4%iPt?X8Tl!m5^kO{^8EY`2WiwxWu4MNI|wD`!R@ z@jmC;A32a4+i%rYsOZ$j_qw5TFf`OAs(LBttQBVVE4FL!x>W06WaK3z8p>}vjJY{- z?Ig72V-o{}R_yQ|dz>K7%+3<`EXITlH^ZH~i(a&qsV^Ajp{jj^aMzj1%Vw|>R<&;> z(yh7FnXCN5eP_8D`Fu(YL=-a-gxuF6XjQuKlB>~f*Ce3=EE_LTs_A;=B_y;<(T@28 zS{zKKL-yoe58UjI;XH*tPK6LCyeC;(+!`bS!y=0qV`6x$E$2L!iE>^$R5$x_?EArE z&Y=7btlBgLl}}>sizKScE?OlC$;o4z?0nyVWnRR9y*Hf9n=&&Y#w7LE{-+eUJ6=M9 z4buc?jI{g%F22N$?LOoP_z2(Erv1!CoSoJ78_4G>y$^xtEX-t~$;L4;-UkKu%Zg;! zUsTl)3OtK>*hW$I}M(Vgc_qJD>U;z~BEgHOc zhYa~uhkH-W!&#g4BOPe2BW*VHlDg57%BN@=zUOUwJeQpU8$PdiH#Krz+LD|073K0b zJNx>dg@GvRQyW=g`y0OGQnV56j(ab1e6b6g(RlDE>&9JQU;mehF_&Plcq47hh#Q>VUcU)) zO$Q$pwfAb3(B)0YO7K46<7@HDw^z=x_|O7DZFzf<8zp!^C<1R3<89gZ0{K?Pmb!N; zcNa0-{N($<(lYH94fyri_DIUd%lKsWZ0JrT;bc^C7Zj6kP2o7BZv4YZ8KJ92&c(fx z%(hFKDui;eyzBC%B*D5(fH}-a(eC`vWYRviW>={8vs?Gs-n$^9 z$uv=a0EXi+Z8iT?&Kr5{nX2VcO7EAi%}eK7$t0cL^Xp?6sQ8D9UXmK37#S>o*7;N8 zS}S1|=7(aL7{U-~JT6M@;vxk>TPr_6eE!jg);3c5`z~ID=w9;XNBIwpia08p$^<-L zcrp}UMh4P=wyd}qRO=`*gvqASzOGf8yj>JVr>$RSr=FUY8*L)0APxx3H(8#Ybdw$R zT0ZZSdP$z0c2!=NuU(0KMuKB=+wZUY4ZPWN?XFVoomp)v8(O+_>%REEYOd|EaCk^{ z5PcZzQ2@EiUYTAyv*R~sQm-Py-(BuhEIwb>YZV|Awkh4W6tZ#Ulq*n%bQ22^H>l-V zM?<=%h&(sMFTt{Iy~HYI)`pvR8oQFS)VuBxoPTCKDvP9yuD%YX;1*zRS*NI^X%d?af}s zTpots-*is$e)CkKJj~MbA?%F7NGtegsw%{os{wiblZmddk2Xx>>MNs*C0jAa;J!VL z%^?Q?a$|DJLV$6Z6`H~BA{>nyHGHAxE2NsO^o>%--+Zz|_TpKzj+@U;UN$mjq;sok z-#dNx{diD@ZTM0*!Zk2&C}uP^59teOlE?;!gSUv~%u^t^@4d`)l@Ex>=1Je2KzG(aQ zq?J6v+&!g-$K)P*@`?FBb86SPlK}J!T;O3;$pO*3?aow%yp`qtFBy)zpRp~I&#CFj zA@irKhIQA5<*U^Q>>~mDv{W59&dzAf@;ILg&x5X;{GiuT?+PnnptnY zXT)<*0XV2`ot!pqiwY5gz8f{)d%JFLR3+`sL|r#3+f6`fdylQwa1^0I4I)*UhLzrn zZYn*=4iUuGZEI>9)a>*+AA5H%wa;Z zz!XHGT(dhJlD~c==p9IVVpX%n%zk_QZ!JLM*2T=}mHH(C5*I^o?9pBzn!=gemfp3p z8)&)=_7AECqKYP^gJZ)fosb+Y?!UlL zny0QmwC@=9ZOg;fBVM%OiasEyZ+ zM^mq^-@gG=s^!B^8zs)Da^9~YOLY7>Ht^KR*&)a7eO@Lcto!=961!jm1X=DR;q7Qt zIbEhoD{EQZFzQsDp#)uhU&^&%^X&2baJ9|fp*!#O)ONOFao3tfBErJ#+g}#(-7B|bB^mfS z)+;jZ+FO<4Y6!^pAoT_!9*tC`-#t;QHa#qhJi#5l9`J2GH_U>cZ-m+fvX`{1+7^R_ zQC2939b-H~&gT^-TP9mp4L{$$Y%REc;eGmoM!Vr~kVRBHUpdf1rS@%~BW_p)=C^J# z*d5pl?85u!*}AJ2n#n@<^=8F!BJ;N|Us#-e=_>a_Qo7imx4l%FoOf)6w;F<|K~Pb` zsxSUT%a=fj-5>;q&i^IoA!n0$`(ZBx{WTxkiARs`**wF2}A4@+t5r zTd-v>DCoFCZdd+`it_n%AR)bf1RctR_$JP3-kbLqKHlCQC}ETw zmuelzfJLvjr!h05PX;q^+Rn7){yuldUmcHV8_jJx{|{GZ8P!(TEox|?K(XSkMT!&L zDW$j-cXuyNuuz=h?oh!kxI=K47Kh;OF2Qf!bH4Z7bMK#wv48B0x%ON#<(YGx2IOW7 zV_9qR*_K=~6;pn4tqKj(&t8Z&R(zH4U?j3H{PuJI0#?N-DzlqsmCLPBwvntrnJ7xw z=5fBkS6)0J?80ty`Vz?{z#KSPO`fR${zIV{+z&Fbs?GTEgE}ZENZC=w0Qs@0_|F_; z6k6HwRvS`9%P-V~`#mQ-7am_54AFaX;y=~lb@?wph&v^BGJzsDqyD4%yEH$zh}8{; zPQd}<`@u6W^cnO$HA3OM9=OTK;qm~WJSlFi<7TEPrlwxj6`urZmosD4LG zq9FUUt)-(HHL_`G+|=|_(u{mYl0qBE)XLHxh=zx#la(nBn-Zc&)CmhQm@mmGFsc}+b z<%i0aZk_-7woC=nL%&t2JT{Ju9+w_Kq9hWyXUGC^;f&6&Rjv;{4F{s%`MF@=iDmZn zZ)7}Pca^EAO~j_e^gfG5$X+2g#Q`Zwh7=C4!BaogIoKZ`F7pk9Vu2$KRcFT_u9mb_ zz^!z&zx5z?*;rUfWa~#Acd~A?!~@W!ne#z@ZaH2BXJDq@;6(6U)_SQzXl&#g>{ZM7 z)O#6$KSApyr$rfs!8HtIe^t+~4lThPEEH& z=eEP|QXfu1^?%iCbBbCY_?dz;RE{F|L1C}zUncO-X* zaoF-BYcc%m%lx5Y{-G?+f!tx~5lSIdoK?^)j+nmc4}(mMj&77Il;MTNHltRJCR};Z zfTj{xzmf5BP~xARvl1#rEwYW>aB``n`Ukrk;>R|@v;?K?m_`W`e>=jm1uA>)u>{>D zNP)XM5p5P}X={*iYQ=6Hci1{OE+pkYOD_ijle7$PcbTevaERHg?>hyC)6`)|Pd{Q; zLb@Wu*VLrzDYD0OzC?ufpSd6FLVmL{;yW6SPCFA;Rm`tVS~I%dMpVW$P|0WvTF_E| z$kxHbM`c~D3V%$rGYgi)7GQO32j_MR~evnYUds7$f9+?umWjNfkd1cRYn zG$y9m^1+xVH0df2Vl{kq=PtZ!)qTLcV5BQo9mvGY0-H@&X|O1N#j5FCqqZisejl z^06=}Vdsy!s$Y@p$0Mmn`PAU=smBjor9~X288??Av-y_m7jWm`liZD`e2pirpV+8uWFdcIEit57UB z_xJT-(R}BbJeA)`h;-}SJ_28ZGk>fZ5KMuxyu#BY~G(us$rZ^diMhDD4m;S!Ty&+ld5lEhnSyDV4{pmATm8DQ}|xX9ZCk+Li~T^iA4?o|uMyU7v5Yg{%ck#<*%dUE$!Z zL~ob)R{l13K(&%Vg!ny+*%}3Md2TtpchqK(tNz;?io$nJ%SUA&g`eAU{c5NEN>I(P z^ZSJH=FgV)CBM7oI&{3*(Ttw#X5%C+H$?0>oA8+OFzRSVd#njX{bBpYoX9Qb%~q8a zvy;Ra1aI!fTHq9O$D=E0k*HXxTatqSMST-FZ#c!zbyxEyY&^_fbeNj)(MqmFb&o=P zyI9axrP=ahhF`5WZ_0Ww@yqCH#46DroY#$WQKwJ3Mj|qUdP2Lw+PXf{GUI#iMt2G> zfwvWL^BQaDWCcaXCQrW{6?0ZnZ^cOVM{f0~P?eQNXut~JtZ4w{0j2)~gJ7S&^2YP+TfJ}WRUoU&|v=PwbU z$`NUwB^BT`tL;~9MZFp@s~|rAKQmR}4sW5xGHNu7Uxk^4Ma4rq;8uh(O(S)Cc*Bb_ zDZT0=B3;8FYDObKpV^*#!-c8|QsxO(4A^_qMO2v_Sd2u%;};?V*r9CR3aTw|ich~e|K>QQ(nBwL_wjMS+S%L4T)rE4ll0xIUgRwzAF^r`dbh8xtC@X6m%Nlf zh`soKmBZrN^sU&JS#=j$x1qKi3VGvlxbr^oOSg;UOcfq*gk}9&)}`MLc=94G`wlfk zeiagvHg;#Z;XvF@LvMSz)t|6T+P~`2N#eSWyepe0@w#B-&(F!~0!@TSn`I*(kJv=0 z@d_O!Am8NhQ}br_XGA*Ah8zq*-=?eMcht9#YCZCelu5%wi$~2}g>1BMvLx=a<~4Nw z;N+C8LH?-$x!s0}-bG-OBFw2?Ls*oScB7&!MG0LjJ_;(0O8v7!-QrP3Tvk+u?cN? zsd{}3$zD}$_9(Fcse74p_}N&K30>O=n6XlTQnG=3YU#Q zF%Ls)Bd75N!Z+Wc#{@WO8bl6>jJ}PyACG95TnDDO>58UljKC4HG35cE?5kSXFaupo zeA5V*j433x1+gH20Im;lby9s z@f&H>s;*h?e2rH{X}BuumoC9`miC?rTPHe4_Z&nz~yoe%tjtN~_3+UWpmR1J%3H84uM+ew7Y-!I$%800Gr!dy$wkf_k zChW3ExOoj%)5Gxhh*nzd6`!MDPq&|<3~BKe<-kVEbYirh9o*vq&gDrhc&(qFr{N3A z63!|m=H{{+oq|Dp{~F&fLU03%L1Sbi&N#`frAXGyy-7#0gSx!WE+xd1$@N6(cJSz! z9wnI*1FS)KoT_5fte_lK&x6Dl6!h4)xK9f7m_(l(2!lyACa?30(B+NkvI<`+^x2aO zqG~1A%NYg0d~9{8iWhIE0yjR!#bhi~E>%dH{o(1EvRxf?D{v>#V3-fq->XG^B7&0@{;rSX9Y% zP$IQ~ivzBeu2BOe$i=Nv18^i>Jx{hM3GYvr{H}|&eebUfkrT^l)siozsBx|%VG&DZ z&smh$Cz%(??dUUO(F4TA4qPW9Tw_*I+lRj2*$d& z+X<1za;i^VUxno@*XzOyjDzX}-~~=M2SjQHhYQ-($=1#(ak;IkiDs{mKjkY8j8E3v zf3J&md>=gCp`)7~5&W41;uyvWaHTK2qL0#D3Pm^)9 zHbcAF;Nnc!)-LIWd#*{9&gakB7n}aC^Ci{VkodACL6MHjT0X{?x%hJyBkzkmQu1d3 z4`2`dh+amo#Ex4(wiGFGg~S{$77mYf>vOEuuyhAHkF zTw)SN3iTHGxn$2tX1-6A0c#$2^t|c^OV^uBkGf$P2P}?yVSLm3MC1%Z6hb2*6V}6t z{2XacIZ!!^o~1@Poha{pz5^27TrJkH2yY648mC#`i!^Rrq$6CM9BL-R7TNGEEi(XS z@;TKb^?s

lx0~H2z4d3Wl>y<5Xa+<>F(#+Wy%QRPtgfby(b)iYX8&yfOZU$&ju< zQXj2RcMELJI5N!U+owyV&#Lew;neEWL??|LQPQEDC91gSLC$Xed>#4dHR|uGC0`RV ze{dSd5QQX7H+V*NnxGN@(|A5T;HDE>f~e|>+d^HMqJg%l$#b{Gg>?E>ZMPwH{8Zi? z4h#E!ceizU`R=QaS_4wS{r|Cg>+|G3Kf_iz#O3uHo#;6@;=a8`!NtXOEp5FNW@yU6 zvOCOHVo>DNbwEOT!;{Zy^MO8SZZkihI&V2+K|nMJq$9#~tiNu3Q)B+*GpagN=ZF^! z$-}H&@Y7&&kwR)L%M4m_;Nn96nhu8!>wg-Wk(<(dBmaX5_&PJAmTden5*h>gh~)Bj z9SLO~kUew6_Tkr}JtH5(_bL;RVi=K(62W+|`p;Jk`qVVfR@wD4N>2RHU{tg^(^sD{ zMA#YYQ|K+Mt|@jB$=V1o%Jjh|2Aq&D$Q(R7_djjSjby>#VO)s$D;6I zkfYzR8W$c-UjNoL&-%sCNKsXe7c%W!+F9z%Y=D1y)ZPQu=wTz-UZNHdDpP!LFHx^2krG|vOF>B=W_h`xC z)kq~&Kdi>ZCs&rW2?US(%P7JlOEX7fLl)ZOMi_CJzF+W%DD-g%NWVk!Dol-gHh(o# zVolI6=_}43KkM`c_Z*8^lruzOg2Y*zihl3c1R%mt=E)Kby+xIo<=y5}Tf#}fX+O?% z67g^&zhm@U0rGkew^vHyDlmWXp;lCl&`Gu3#8p3RnZ z>%KRc-e&0T=GiEkF8L*4{-4OgqRML8?_{+i*}QKIg?@$K%>Ioy$Uk-LagqMd(?7dC zCXY?|zY(o}o=d;IiTT&``mfV~_s@^&|G3@%^<46Xlt=4dFYuo?XFmSV`uOjECe=~# zKzY`-w6=DGQ@!!2sbP6}#wD9D_tJklAr|q{*7sD)B_5S}$l4+{6H!+~snwpw^ zciU-4%dKgDf6H(?%vV|2Lpd?v3Ed1qH-=Wv<6kW;EenhPTY#r?+dnWm7A7XsjowH< zc$&S=5t^#{TblP!ezo3yj>~OFabt5cGCJC!!SjEQ?1hY{=v+=iO|2VtzIk_4JMj;9 z7ZDrVZ&uBt|6o5`zFs{**~5uJ>Ai*sS3_2Xvii+vv2M`TxC+1%Dl>prAl8lP6NQ)(Q~t=q(*h z{B6w2zTrl@(qQQ6c2&jOTi~?q5-Ug8%c-H^{}EjuE;?Fiaq-%55gK?N1fq5fIEcII{dNv7c{k{=ftxQQ@CiF@q~KPW(B2fC`kYz}}p{ zphZj;)&+-pfsf|{WLm`bo&Sd7{!y~WFCNi8m(tQwvxDgp#XPZ`hf_#i8Mvcm;Z;9n zTsZtYU%(M?9Ae@@xHO8NK7FdK^Jk8P1q4iBtTna3WQeo>?;N^U|C7=`@blNNASU%f zPXBxNlcr59r?zwHBTwJjdPu;NkCD$lJwk54{r<}xd4JfP#Ck5qd1$?&bwR#TQjB|st>Ad!x0^;I%)#d}Zq@=_1 z)#m#v?eM`YABkw@R~mH})e=_QPw531wIp}UiH(Q2s7$dxe&|W3#ka0lFUhLhfx2_2 ziQhOI0k8BHL-JsPc;KuJ!$X+q%5AwpTECY#>#ySFV2cTV)R#45@akr%>0wRiXjW@T zjQ(u(*E`b1WuZvHiKW1q6+gjhY=*ET$B~Y;dlpJFl`o9_?YER(#)8QvhfEk2ECtVN zBCP($SnR%zf&eB-;Qfs465p>JvFQKkr(_j?NlF#m-TjF{F}?F>p%!?vU`@hnr%_vw zj{vVs*#0X+MgQSLL{YBKcXq=T9(`z02zMM+hpytmP9Dteh7>`$C+*e79$M@o*NsK; z>n}RGia8JS@Wf=#HnsT_S1RjEMge*NLt|%D`|;ltk6FT+odGJ{h9e@vm$tCCKXTGG zM;%>oNsLNMhX)CN_z2Udgl6aW6eQnIU3{tXc#Y6UpzOJ=nX~cy;Xm_Q2p2AJL1H3q ze;n0B5Hfl?r|B#BN@mxxzoh8M&L*ey*`u+ux6cr8p&c3;8k@ag+MLTAac)jRAD-xK z-B#;Zc7yF%dOY)KIW9=MHzv3{lT?9%hZGgn#3@N%Fv>2LFq*dvHdMNq%@LAB17nMf zhs-H_aX(DNKkrZejh_1*LEp(-~Y;5649m)plfFhVIQn@vcy-3_w8-vI4l zS%7uB#@qCKvc1^<#&~k-zS`r0xf=OR%2oHeY?Hi3{j1q8|J3C@wA%8Gtpw-}vB7si zlwm)I*0?>VHv-!>^;jK!-G=Tc+*AJDji1KhbMZrRO3L|8mNOUV+3%n6!wI7-UX$#s ztPx3U(U7VAE%BE-Y*kg&e}-*|S?D-mQi^WiE?0GaOldv!K!P%vq9gi*=lm`*N`8^)~csNSPvR)k-0`oO zjQg!PQJ#FC!$pIra&mP!e<$XZ>g2>&Vqf)Z&^w+lCk!~oCo$r8Y9s={ zo%m5cVb84VvpI<;10G$+dDe~=QFcbBxb@bCV?Nd$Je?IM;-)2bj3i9Ry)yx^5w=q! zN>|8?)4<|hCL!Rg$-ukq4MP+`)>zm!DnBY18alGs=kNw>+HGWPJ@uNfjgsFf_{k%R zsHi8DrUPjHkg~Z=mPix_jX@-)1fLQ^9hC~b=lKU1!EP?fX+2}-{{ENC`XX;4ym44z zcXuwZpQvF2F;JF{O&dNMBW&eHvYvOWeBzG|N{~Uf)kiO1jB-!$awSUht}`E(nYr+x z=ozblN@K4t|1sXIiX=9F|0zW?OoEYqsUY(M6;pXG#G8dkvd+;x{P*=Y**J8TZi z?P*Ru;|0z|HZ^kv`RHjoce?($(h`d=16{&gz_Cuh}EsglehW1k5x3BHqlSD5I`{IMrS&&S8^ z{duNPSaIk3%GHX&n^5e1Y;UKk;dtdSRCc*7 zMHDF2#HPiH_>Hd4NkzWM$$f7+^J5mt(u{-A10IzacV?u0c3|+4XbG!9UAinD-R)5< z8-EPANPNuS`T5ny04pH~tC6f$RYui~|6U$>#WAp!u;XT^}vYU&n+H@GHL8`At;pI_JB4I3+L6*7A5_`!4-vysVLA_ zp5xYrA`nEPE*AA3Gd53%wWm{6Um`$SNWam6j+Z|o>!c;9YSB!q!a(kza#=%Qw6{Ug zs`JlJcUc=3+4zaWQp?laoczLq#9X65{!V#ETTGlt3c-q7TX!dD=10iW+_!ZPHNEtK zlR7pB#W4okwL6SsLYe1=nDXVS6j9`DoglaxS~;G4Jn5NX^BrU+-Ue{bBfi>=@sQss zh@I%`(;v@d%c2^yV=J!OOP8!C+mz{0iQtELVv3-?V$;K6afp$JD><* zSBIayJAYz-%&yK?Bqno zv$+si+PLYV5l1TD`}DKLvL!cLdAt$=tUB`{9tQu0OiuuzCxH(Wv@KGFv8`97;&oN; z>R`0?#Ma@dRZ@#5)2jaqudtiQ($$g0{q>Rjps7|5MQ->~g#v7CdCvR%N%M+SEP6{; zy!5SSb->J9uHo1VOz(kC)sFB`?v;%a&`TPvI>wVv;X4PHTE{*St%BD5+1)p}c`$+Z zVRR;CLl%o6Ub^Zj7!HWT3vCD7M(nzhfoPS!C&;iZsg5Nzscf{Hw+_1P(R1>yXt#}j zPQV;f85++I#~kH)JZZYC92Q{r^%a4*{G=H=Ol`f z-K`hp)G3npOKzTQ-YbmLGvtf)ZnBv#)ZGFfYE>3AD3o?XIPK6qt6lw>=~J+rye-JR zR)IuMxRNUO0FCeqp)xsRNV@K2Y7MLmtWyKXVGf%-ADumV_9NYV5lsn_R@j|@IEgs7 z2M{^FzuA3zJDYlO8OMD$AG{k*TC7MsK_cKw zP8Ue*DV1%6=(6XQdqMYT8$6-hTRk+^$`NC^bCVg?VyC{XO3F(vj%Ds-3hu%)I4D!Y zIsyHXMQ=*X(Jr3QI&l)#{x=btxv@O@yraGXJi&ZxroOkrP8rSI;B3&C!XodOr{{Hd={Hb#Yv(G@5t)+6}Z2#=9>>8?=O9`4As&{9L2@@}A@-EgZO+(z-v% zRZR26(VILU{1Q3j{G4@X&mHM76$*k|gZZ7W9pYty-<;KI0Mwe6kC8T-Wc=Pmn(h*mVWZL;;U6o<$w`?kul&CkT6O z)5N<6oJADnH4u21TerfUGpcD*2t_B%9CmB5-jez*)Vne`x1aO5rR67xe0{g>$!I%5 zR7+EqjrIO?rzHGz`@89JKo5`PzhMc|sUOv^I`wmU?x)G+?G%P^#T8L6JJP$cf6`oa zJS(%GW`ADH1de)x-YhJX_&xRuOm;LT?SJ2yeV}4oQW=yr91k~UH-R2l`oYFT?ynW; z#I|1*DhbD7Uass6C)`#Y2F{nJ1T=Q^CzsXUUG=?e?hx%hTriPaKGu(|K`y0lv(hgQ zctyB8SQ3u!FK-7Z*MksM*Qlh5dE-0|xr$jT@;L{HrW4n2h_9!_)Sd`&mz*fqxv8AD@e!rr3=7Y0P9BNil zD3$og7$~bVwVoSx0A0B{c0*@-t?zQkJ#nc$!BBbRhAABVDHBD~IHdo64a(tsX)?A)Hvbg2M?hHsjBJy;&zGu;-zo&4*c&$sAal>o- zsqOx;;_47zN^_Rs`Es~c5oSR{6-y;40Ysf*SXNSqL4DfkJ^W?iOxWt?HdtC#kcg^? ze|Db~7r!gJl^LE#_k~&KYEvW?ygJ?%$H;v-tI)g~jn(lMgAp2VpS#;H((|iSr(f)e zwb4K@{X=VLj{VNXgmvETVY4xyp)JsL)AFRFib^6Em(OW1zpX9tmzl81dYW@l%`nNQ zQ|_0zYVB~f%;tlFPVcuR7St^`GSdzb9x0u zaX&0n=axUpmyW!p9t@!=176Nt&zrYiuBBJQ$TZ81uPDUJWp|72jH}(pWEYM9;EAJC zzp>Ok%n;Z?ThTS{s&5qU{@rLO9X`*=Tsw=)f5WtLXCd|yu!DBNye#5*NkAv_#n5qx#JmDI6tJcJe>Po?JZZMIm+mQ{@vDgWN&}{#)1AR{pjl0{RHF1 zas#O+D=ZrfX$?VfK8d7omR!glJw}*UVhCbI5vyZayugeTeS9ZwNxUSu19WQMQ8Rly zgaL<>3?BdBH%tU%S(h%mznplqgzr=7|HmUN)g8Q>FNTfEeYB5=yvl! z9Y-te5Bx_9>ud&1Un)E9?R4QWB|yr^$VgI_QwzKSUJkrxjfjVut|t8Z=aj(M~l>UR#xlQU53Ad`_ZUVLoT;dNT9t_#rB%WP|MN%=U0RD zFU0SKNr?y(z{X8F2)14)F&E3JiPHK8YPzcLbq}pa3XaQP4UTOxC>Yht#D+Es8H*8v zvg%N|)h`O0e%%&0h(Z?pvCT25+~6}HD&e9)*RsdAZTu#8vR zV%6@fE+jXw%^V+y?jDO2&u7z^CZN=f9`zJE-VRBsT1a|BF%E$@SJ*W^eX2|e)!jyk^*?OHr6QsU4C!}IZA>L1zjje) z>^%EEqRW_Kxqq=yktP?y^re%PX!$(5Ty`vhv`9uImN`Rs8)qNqG>iW?1H0V^ql{M7 zT=HezSGH_(;rBfTfVpynk0q%{SKj|}|(9_qmyUc3HYs#f6lK57b z@J9~KN>{dQBI{Q3DjbN=Qfj9>KCY(a7a=2x1J5eJ1=H(gbH~Mr69X72U%M>^Q_L;< za))2XO08F>Tr^(lp;Obag%6V%S0pUnv-9R|?k;<$If(8tRC&w_$YJrxwHm$On(E8g zuxtCP$)0rD_5}fe+&@?`0iwQ}F>=w+RxmWo@Vo2ftPVtyVi+C-DaO+u*5>!`6H1aN z3f>OgPlO3H?^nhZG<7V5)tJ9QrDpTROk*0kB&*M0!@gC)f7QJe9lny5xcD?MTW_5j z5{mSe`kcad5WAH^1}H_IH#{t#!E2wIUsMz|cPQ$L_@7waJyoG|nyMHf=u^<^2pb$J zg(MU_Ho@Irhl19!6Jd{a66+O$j%U=XCy`IhJN*zqxwUVQgOwr!H=7yW-rwp1aEJN$ z?AOOMGfDNyy$WZ9c~mq^YPxdd>h%GAh=l3APila0a3ZPcRsHT?n#%J78T7xAj*%++ z-r3juV;1#zB&1SpB#MA87&4Sh>oIc-F(j1SR^-*j_8K6Cu07U^lXYPFhMh^sN5$CON6SaicgcP6&~Ko3B-zW*w&v3yUT~PZ49g zQpU;r4gvv*^0S16V1kPr1nG zHHrJZ3Bq?TzaA9vlTLo3*J$WbbPP+c1q62!|2}SKcfk=YGp-D@7`Y#ZYlsLDJ)K{v zo+kZk`+`7UQP)-3P~T!B{+OWu-F#QtRfmC%qzDV|{aazJuh)OC)%<+D(%N(~Ic0;h zg`$HR8N=9YN&b+?pmWHr<$cHuH`&Yxt?@ybgaM#Hzb8 zYZ%}+p|1(*w{J8dpZmTkRMF*)EAbmCtR^EpvYAvPCuUD3t>ica;uQ_tnu#FD3`rY- zc8P`~e>j%0*Vz-eehk6Ry1RMDrXeMD>{M{^-0Jk)jMTKgSI)yw?ihVt$)U~aNN0s9Sh zN5akD%$a{vA(>GD6Y3P5Na;${>1b68)#;>C*h*FyWgMWuA<_`gM{296{m#k4m+IYj zD|(*!71qzgW{Ma;WYlN_P_}gmq6m6iU5gTj$OnHYoLcOklmQ-lB4Xe6Y_Xai|I!ul zS~eW5{7TuJxapWxC75FjJRaVaz%67m9y(?zjtkeDsr!A(is;dvspOB{>LA zAnbm6jWuso{a{|b_|r7+-fxg>r?x`ADxYqOnH?7rJlU=jjMr2LawZHA%}N3C`UzR% zB{`FPm4)7z2x}caUoo0-na|&BGYaMsX;OapK#lbB+GxePf5W=V;01Sd?ZeCK*M$st zR*Da=*REeGnMSNfIJk=ueQxZOq6kqS`BR@QQ^?Y!H3j)49|N7_Y&7?WRbVkduTaY*U%xC~SAxJA<5>7a7MY-(wu_|>mb2E9-UZ%i}H_0k7Mm%rj7I;;UOl*c&# zj{jsFrccwKF%>CRhDQHBv~HPfM0`|aP)`!C)&_m+IW}|tYs4jr;55qq8yFik6fz?o zkv?02#+}9my4Oiu^fI5{Yh@BJ-*efr6myBy<9IOkn*S_2X!f2{ApJ)tLOCJ#m%;D! z1Wn8DxbNMDOUp_sOMVs>hGk~j+1VY;*I2TU@i~|{s)}*MoXkf5zVja*99*M%fuV@{ zT&;0{?$B5LZz-gtqy%!5F*dg#4(A9mE{49C*eNN%4ns_CE8C4vJQ=7Mb(_w&{%uVc z_FuEV5XFk9Wce+9Rof|9^O#jN)IkaC<21K38)ayv@&tiy+WEoxg*PF3Y%C*J6-0!1 z3;+rts3L=dC)rPlj&RLfH59C(+XzX|aam)^znvO1{^P}*N`qx$Uf79A5aNJ0ow^nrimf#{jIN;SQRK8PvFK^}! zT(%}Vd0p;9Lv|7u$~iHyloKNH{z=T%jF%IA5k8SK{Hv1Of#8AlUf*=o6yi5VU?QBe zlLrvJ)HE<_h05=|eN}+&KK!B!g=+2ebmD!SNH@F=beMZWq^HIb9`_phSy_34e=~{! zs}wzNIww{_W@hUxbf=}Qu0Pd&kh8gtxU2&Tv^ljYNq>>t%bg5L1$0akF_ME*@9_n~ zh`Z?i4v0xais3iYCsc~=StpnfecN2|4-u&K%D3wcv}SAP zdxoF>D~--nkQr52^kwYRkH5?_tLWT@>`hs%Lg*3_D2}D&y)Sj1C?QdyuEX~BfX@OX z#(6KiM%tKUJM9ziZ^ei7tZ`q6AKt{RxFaS=c$T6oe4Wc_S&LLnRJciv!JoM)^oZYH ztH3{1e&0GT@sN9t6s`}*_+ho($|?)_#v&BTqie0&Z`Y90uzn>57<;BGYiY^EB;(_8 zD^nU79YtFE6~%70HJAWT{)G#=?Nr(PrKJpjnd0K%k#Jd{w)@@|Izk(IdwWe9^FoS` zTsS?Q8TdJd^U;21bEr0?~{q{{K8*Y&pi$qI;QZg*G-@Wm+WC0_oct6rArF z5cAXDEYm{~;U9?La$Fdb0|R<{QPt0ZV}Ag_zI}<^z71~N-rnZD8ZoPmPfqTI=laL# z(+s-9sR9P;Y^Txz=5Hj!FxD$uPc|MeC!A-0RQH+D(=Ddi>&jYg-y9pLPo;In_nV3Z zXC~5;r=%blaMnDaz43?>Q2{CNy_nGIF?KJyw?Wft>EFk3w>HwP1^S(^56d4ESMYKs zjE4H_=YGk;sesPCXd|}XRGQwd9Ks`akH@dN4E2>XB@hvw9vJ>|3Be-bYFO7R_MF1T z-W&}xoAxyL-PhT5F7k;#YBJL{AsZuqy|{d9V4tey{DS?; zk;Jdl=p<9r@1nc*$mWu+XZ-ot%^!{|2;E`yn9P)FMw9W!HZ?V=-~7>k>VW6?CV!gC zP&T~;z?JJ(+6t#!Kt6CDWx)D{tS-}nSX%N6MyC^Yk$+-J-@YZj=B~P`^*fx6sL$Ta zwK_LFM^D$3&Vj#F7ZR5Y%2i!L?s9bMdMwbXnP@UPstITDy>%}Z9!6U?SSy9wpPq9t zh=@Ddveh{sj#XSe?Kclt1Rc* z+UzB@h$u|qADC`W;%gQv(#S4D07z!U`zlKgY3zv26awFcb}Jlk`#ped^W+BzD$B}n zCSb1%cu-$~xv*FpxB*lV`4)4<$S-{l0SDQvPcUR72h;Txedj{;!!HOTvF)sxSEy8D zUKdwM6e@B8BFxO+g-_dRX{^uA--O4}oA@==a9fYlv9VzyG(wfMv-to%5etYUYFYmL6E|8?uKp{WuglbUJVTzK=Y{hkjTs zPl_Ic4y_u72wq^YQ_Y{uLxsDawk=@S*y7lfai?U+hzV@_qV7n5RfxpTqqSP+G`AVu zWgEN>5O1}r{^+nF@=?jV#EFiCF1huAB`WlX@THbpdV~3OWUFKMTE`hJt;d=aBkgPR z7vHrucy=M7#gD&mZ5(5NA*-oMQk$PYce7bH<#V6cZY+x$Y=FoX|8!;WB991Qw-(cj zd<^LlV2&EnyX1b?l@aXow^^wdu#!l&>>E2M->5o=`vP-t?d>|b;_$w#z!CvYjMYU| zVw2BppO2NPoGn~nQ2Li&GWl2(!mF`9lZ17_p$$NWqJi5OZ*N~;&H;PL>d(8&-CE$K zRcTon96g7J9)3!gd%WXVnUe{YVia7%{|f25ppJZssb zR4bUs{^z5$wxg~viU;LImDuPPoE;H zy$Iq~J$ZTY#J9n_p1nuKX3MwYRgVvr9hDAw{oU8UNoE%nZ9Z9J8EN=(S{w4#&ul|l zCNfWpxXeF#={;EzOO=ced#$REE^O*I=45n4NpTyQ4c)4?oy}s>Nq^(mbOf}zYSjV< zKFSw$^grCJ!{N#T7}^&qXXWMGq5d2w)2L5cHCXC$XhP+kC2`Z?L9x9b=Cn-B5u%I1 zMWTN2 zYlmQn)y`-Jy!!p0q|QCOQ(XJ)ih`ONZe(O6>>)4Y3MH1gVMa@>>D1t5xLz+&9TbyU zQFq9f9t9@v%AB)St=iT<@wY{FTWtw#LRfttKKy7ndS^)jWeGjZ>&SU_{C!}5uBNH0 zEbTJ|v?p5?k3M2P7|tQi9`(51d2PqSCT#F*@Q$yhwm6jrtxMLY*V zrm)v;m6JYmNEJMj=s`niRPA=K%iMPgq;l8$)!g`P;kUh;yq|k%e7(x!iz@fn0s|oiFR;&Y z6HItLoV{mSBjb7EiDfM)=+q4Mz9hFRicDo+;gm>ggGL82$lPDIHAK2s{*zfyU2Ih1 zUM}_sI>(YWWQ?}!CtJSe2#dw+j;PvUar_;X2wPXjmxz$7HVuZcCAw|pM&gIqzsbJO zdF{S&ajlh?9WVD!J0eLL+1R4_O4vUOP~fRx1*UK{DICG#s>4^q-=ltfU@q$}C^PUK zIXSNMWjU;`EFw79!-+>${rq<_HTX-sq^&Izn^G2oQsyOaoEvibG!Rl}tL_tTF}d^P zC-Q0N5$iHTjw_>Wmpjk-LW*I5?Q2S!K_?roj!iFDm|9s4_HBMn%cX2uz zJfyLk_%91Eam@8P;|Q?Iq0HxaVB|AcuFL0NC9Rn6keH$WVB~g(T&rm5((B!+Sj!o} z*+-BxdNifXt-?>gvXdVx=h>LP`Up)Mef%+k&`$--172qTRMV% z;d0melxAqCn!)cI{@Zq{n1>h*CE}ElqH&voh}pFKvNg}fx?EUXx@&umFG=SaN`8}? z{_?n~l{75GKyk_~es{$6nmCQ$MxKQ+e6~Qi)kN?g5f=yssB9V zun4uRi8YVjOxKw+y3B!AQkJ4GF&Jo=4a-NxtOl2e`A6RM)%q&g?VXHeKKp}sw0#Bk zh||uIyBWx_NJeU+cdy9C^7PCG9BUGzJ1;QGwZ z-Nc&~iu%iLJDDTe+p*UKnIxbmXDfZlDBHzn#>+U+Ev5Rqvvf_WhhGaCs~Ha~fPOk( z;nA1&L#<7+N3jpUm({FQ$X^??SaWhlbG_AhlMdLo(BNyNQQpRvBHN<_@dmQwvCPk2 zQNX=Ch|Nx@$Q#aY-6O8zDrn|43w8SrKFfX&>e?BlT?0Xr1jalZ{8#A~-SDzl%S_L5 zevt11*CuJX^Gt;}e@uIOGaM4#iD3j;&9aV(zJW!tjC#DB`%X1M#@bp=oZPmD-)?%D zJGERgT7f{r0{z5{S%9%DNQ)`LmzZ&G*}MV=>4`e$DtF zLzlCOren19#P7`Ns<`mR6a(f~)G53f7I_Qaw=edoO|Igbef0LDjVJ#7b)F5Gy8)L< z>Y7?M1~XBkMFID9`zr0%qG>#it$BRf1bV@7!bMps{&x~n(>UA<<$%b~dBD3a5a;vK zTVJci1u8yq-wUNAS<7fS1|{eX<7kTIbJsvl#?J4&=hL6&F#v|y%hG0#Q0F~B_x4T3 zuMJ#vH4PJ1&OU_>(}wG|vCVzqd@y)IeKRShEf(E1(e|UDKGd_`#ROj^R(yG;F# zb4B9^r^}b6CNmo-iqo@<`rKb@^4cDQ4a(*n7Rcqm=Chu=GicG^wo)+o=eiG;>9-O|8pYDno7u=v)9S zf2h}Y)Vae=l0CvKv3Vb~YOQy#Cb_%v)EhVEs_g{w9d6zk4>#iRmt=C9tvfZc7HjF{ z_kMJDgTCAr??)9xF9fXOK>Ed3rbXPJ6^*liXQ_vytiCX@BnHrm2*i3Tufn2y{PA)? zo5FMD(izsVFLna%EjdhsJQm#DG(sGGPS&_qyYZqs$J3CFs>?FhpY9Epn)@;P(KUw4 zyT-e?R$L~kwL$wE(Mfrkx46fDntbv1qGty$B3ZBf&zF>sr@b2&98RcCPP<|K0}?l5 zT1^H=`jZl#6Q}E;{n%*U?MX~l+E#w_9yg*U9x&0f)%N`QdXEmc-L~@`eD-Rq@`bq5 zaHR2WLP9ploq^|7UrqBtQ~O!Np8+(v!Qm#9d0GrG_v1yhBvgnP@n0~K-Xv!|r0rJ* z@M=q50f9<`PK>rCsC(*mtb%wCAV+=Mba%7Q1BFE>mK@lC?9^hg#5HZrf|rO%hU>cG zxie)jn`X5Q+_J`YKxpG&deAFjd3l-Z>&cWMq2kOL-w62|eD*?P*eabxFW$bTzOH%D zH?vW6H2KvIv(KS|WF8kc;#yz$FSupwZ>T@i=Un*H{>mT7%0Zer6Btd$P~@p2%G{vR z;(HpK+guRrGLxNE9O2XDk0O{%G<-`tPfQ5HjIZT?) zckO)T+)cqW!a>)ZHIj{up^}|$?NMKe*Okmpnnlpw8Dq+faF5}v^tb$XN-BPE>b0dG z+}}_?kWZ;BM68_RKV{Cg`(+p=X)Qo?WtixWHPs*yCSiborpDC;@crog7?REWVysw0rQh$(Z zId^m0BD=gUwRkNLjJfzmCl32fo73E!363-fq+~duFf0rAF;yfY{A3&MWUTXfcaCV$ zBlLyK0cYL}$rmT#y4QZPG@fzZ7s1|ox7U}K?6T5^ZKHB4N_~KZ`*LQo2?p|CwU_Iu zT`GE5i$}ppyzcC=LE0bjnN#F;4@;>WheE|JB&p}cU$`>4Bx~^m_wcoId{0tths)+O zb??$^srF=Z*iKn-EG^UPe(73iRdQJ+0!)Ske=ya)M=7gvH&+}bo!4kvy)MdE`oMR? zQLjU$i}5Yr)_Ai5wdfn$~&6#gK(3z}x%x)BWmY_92Vj?#vI5ZOsJ(z)bJ)XEZZEH<#i6`_Sv` z!&iIG%~wjKR^Mv2-m#D@eu zHto#*??ZH5vgt4;+AiCvT3dR=vi3dq-Ro}uY>(?h{_jKk=#J!>0SKDN+pWyoP+&e1 z6+C|{5fKxwH!%F;kpzDV$;f>jnyM)FNIW2MUyE0}fC)6Uetz<%b}d1(YBN0~q+6tg z)SqiT&ZXoZ75q_R%De1x_%35=zd=NgGYiuyIJk9iCTqo&1IqwFEeaY?F4M^vTM2qhFvIsg0!4G5OL%z|el|!r>MVAT^PpMFcm}ovAagW;ezVYd^wyT`-D- zP!WiqB)#l%Q0dDaa?#WC0Uav@kuPKmE9i?#OACH{k96Ow3pOvQ3j+1`(43qcu2L!R zoyrNX^U5qK;2H0%+y0U1lMM%8L`r4jB^cp{cQUiHi|5PJ)xi4)(nab3TH3au>vk`! zYo&J9O8hE$(B|fJl9iJfAD;}RTpcSKWPw+}9{-)Gt3bBoN=^=4+Y%=>Hny^g3Y8Li zBba+IGcyCr5(a146D-FlrKU!*WRvvl@uz*ezk85AvTFN@W`DHWQ&nA!)6Y(Xj~^4- zODbh;%^@Zx29Wkk$CpR{$=l2Mx3xpH_4VXFAZFR9L#)3?pv%_O(z0{cN$JCfZ!!n} zerJ+TwkWWH>P{yubJncRAJ_JZ)Flkh^!Nf!Vg+U+-7BT>e2|rFWJarrb)0W!wr{CCWIO z;d>rRH}xXeiesFI0zdtK126alnT(^Oqs44(jM->@ zxy_PfWMRtViYO^aa{l+4%=!czauIj3mkbXtJ^H>fKUqA+Q^*f37^#I|*66$>g>eNr z2)93@lTqN67f#KC5acv+N{c4vOJx7stunX16cj1CtDVdw%@-W%X#U&W_blrJ>EVML zqm+R3HAX1}@hkd4BLeXDSnW3ki~xPXCHotg4-^aekm38!o3R?l^3uv+b?(T8?dK1FOs}sOGn|E2k2Fm%RtE-Swz8ozj4j~rm zp^Ip(7P}UEqMso+729r7DgM+>Tgs6w-eJ=?Fc>5yHFaxqGb=YY;CN6Xv!a5#pHnLb0~wf^J>H_IyD&Lib$4Cf$8g;CjdgBzAleE-!ZcB82e-JU-*<{T41CXP!yV#$+LO3t{wsQP7wNF+Jb+G0c@%G#EdwCK`} zz13)}_x7{0zobe(mFnI%vU^2~BF@2{82O##W+4HJ)#DIU;!FB-_<>GM_^VF+r{Q!D zJgF_<6OAT_m-0|L`w$-p3XltC7M509%8F^omP3kzaO8^MCA$JRFC2Y1zg3OqK32-J zk!c9qoM+?k7;LYa-5``9M$f0vNG1T)WbfB-UWLXV=tJQvXzmEv>vr02Ve~{n*R|U0 zft?6}B>W_~0@Z`_S6i+ex*TbPO@(v^7)Q{iJDTq9LS>ai9NDN&x?+h?k;jzd)?x2z zwJ+I^b3ditrBUy&M}gx^`k?aKl_6rGNP-ZdG#RnWQyKY>4g41t! z{Z?e>-&YHv`miXEy)L^U7-YyOWzGBCnK#HtLykS_od#uo%YJ)aARR&&7)1kNi1l$> zt<~!*Rh?s(75oSfin2l_VDX?982$B{8U5m*)*_N#2sMf;mq+6d;VZW<58rQL1wNOE zm#I@E(uvmm!i-qagLkHXt@X+$@Orpq-1m{(XX;INHfdAjmK+e^{p-Cu^r)KKZ@u!ljWfEXVz^Jb!9l%l?Xm^c!{}h@ z#RJ9>WPc{B4 z1YFN^HQzcgw(m{gq%{0VQdDeRU6$O5ixY?Ch33Q8-Yo@=%nFt zgy%pGg=!L``uA2J)@n2DO4^phR$3s=n@o1h?VnqPI72$Af43MCVXqPUWW^At!Q(L% z{ci5@yBNfYr2PVJI6`m?(|*4`DHE8Hlkz5f1Cy{4myQ51D5ry%&Y+5-a7}4`%PN zCpG%@=9l)^kAGnz4j&#HljcqaF|DE^KIreQo%=&*!t3DdT&drVN(@nNF)K(<0FKVl zw~~@57g6zLet31Vfb~478Ux+Yz$njC#3p-E4Y?myvo07#0J`fylq#4Al4)r5Xv`)x zM&$Mt#Ve7mSr?4nwvD4jN7woF2&NjKTn|o5z0-H$HPN*g6!$PvLvu-wNyy_qzDqABk{N#FA=0@)*o0acTdD(p{}=a3>|K&WBF-o3fq{Gu8frO- zZE>lR)-DlNt(&hI2zdGV->az!ET2_Z^H)sokgD<73Pwo*S8O;(#=*(ys7Jx;^T^11 z_NEI1@$+Ht`qS)ONT)P=&pXn$#ys> z8INH}`o7lOsmr*LgJs!Uu9PdEgiY@?M#Mfz2#w{_=vy>O54EbEX#UkukRfH1h1FX@ zYI*45G3Ez%5sg|Al%36xF9?0{<-vVl#k$*2cH(e+EL7LKtA)W%5?wh@PcdB410-CD zYtFk<-Mh|T@vN6qUE;7n5mKGB{TPw%$i_%3E%FfR#HkeY#T8JJO^~bVqB}&8vwC2mM&EDMHj7?6`*j^zr0A1o| zT%s9D&eF2tPY6QwpKH8*i(ND4M1lAR5tp7XcuQnnbTEA_DOtC^UAL}X3Zh8jHG5CZ znh-n3Rx{g{A?ALh4GRJ8NmOmWEoIZ~Y3Xs1?oC()H=F!ZIwp?)oj-7xS!UW`32zxH zSw2q_MekhFb5#F)ZITllC4J77-o=IE{^xx7dc9QRQ5{lrEap>4ZyW6!-EVhOk}Gnk z1hzI6co3fzEoCdvoEn!9B`-KPHT6;DsmvWNGkcQA|5 z!{XTZtvjpn#BO?pJ6DhN9PgV+Z_iminxaSwfujP6o6z<3b^IC*^)jD9k98hE zKp*`9n{c{By8-m7qk%l-63CZ5;b^*`1As9`WMpKBPGgdJ>CXhfL&0r60h#T(G-&UE z36eQA9Mt4$-$^8ZhMRTu0H(^UPs9rE2CzF~n z*>~70uqH`c>SI?s2JCeja!gRw9H)-jy>1k7R+gzGvDG7#|sF zQ}8+>{PpWs5@460xjhI5VT#dGTVQ}>wnw6XF{C%=tAQ|2Z}n04&0XSYYN~JY>_@~TjZtmY8Lz-bQ%rj69LuI{f zPD&Dr8GiKap-DnN)X7jdaGsOBSB5S%OK1kRKCrv<8~%nnRd}b-*5u-PB_g)9dS~{u z!$1&JhE~6xaoS4Xs_v8-NK`J z1RBG2J-4mgmcLYf%5AJ{x?ba;u`M|~*&0pewAP~Mq+6L0mdDn%W zQ|SCPr<7CEm8m`1PccNF*T??c*)ta`HlJ!Uuo{*tNpi*KX&-+u>!p=|iA9Qq`FaGx zqT6i`2Ag*@nQXnq&OC?n4z*2`q{}^28KK@c@NSHB?fjvP`fG75vJ6TA+xAajJor~1pCxw%2ky&7EV ztx1#Wz|@0kv4@4-1Rzsuwc_db3Ol^4h)@DlB5R5?;R9mlT7U$@OvmF;2EY&a;Of%y z!vE!4VCv?L{`*!pWKWtTjROjX6ZcG%y0(*AH9D-5RXLLt2CgW+`K_s=HE+%RLiy#D z7|oLXnmyTx_FiJ9>{&6DlLxR{$zUW=<)jS`RrO?9tD4ldwlYf|SQHz_v!P1d^< z7<{oJ;T@eQaV$QN>d^f)YJT!!=8H)3)tYq$#AQ*zhD1+ON2YrWI@kN_ve(*J3W0uiJ5_4Kha(jyi1jglUe-fC>mpXyYWc+t=4yhH`mR^wFN zTZ4t5OJpvVE+_lm-2s2@2U#$=M zK5gIP*bPxl#N4hL1)6wtIB5EUNHrG^s-aHOMB@jQfU6re+LBD#wHvW&T+I@nRow|J zcy99Fu0W4%WAZLLu|@CPB-Ff5DW762kee+U|GEyZJvJesa!BCQIt=ts=J-+w$Yjo& z?{2O)ylCbwJk{r3t4cKR4}vN%3J#hKar9U0G04bJ$U zDbcIaInADirlmAOYIk0%DyuBs_qzIGT@IiKBGU(B_6~9E>uD$jOU~Pa`=Ofv-}j}s zI0BK9cMwiTq#L7>z0Ch6r6ahacNv#ivV?g<*Wyi<9}FyITIf959PNP8CgKJ*Pe{LLGb<*f|BOn zyO(RO`Fr(x$>JH8Y_A@WJ$A`)LGfh$?A;q)%E1&)31w-HdgQJ zb+tM;OUlB#nRfR4qEyj?^ycXRtx}gx(^G+Sb!k)-b;n&t2>PFpQ>?NxS={43d++rh z33dZIcEoGX-v*_9h4e0i5wIX(AJMSiB(ksuB1@kCyg#VLW2!IuS)md?8asgFAM3>3 zUR{vR>wc9Qb=4SZ@i{}8CW>sHZ#H&kV>%&Geewh{n0tnos_4&WjX$Bm+PV_BC*PN# z#<}|)O*o1QtLosDonEkyY^zhwKIm6p;;~p!2xFbfpDxk=btVHX-)RybvJr&Z^z%*H+})gy&dj9zy~oghv2P65nFpM4&uzD0wsOVWM{AtBPc?H047y~|R5}ZE;OJHkZ#m zxa%S*J{e5n>Zixm(Z`m~S!`M2w9TN`{&dc zMvM~=v}~L$RcKvki%aeyv728g9Y@426oD~roCwQe2rm8g~WhOknkE!MhlZzU)$-?#5c_BTB4%H z`${^=m@zVj55y;C4^SA}5BdYqqgM`^jP#e?y>dky>IY4mL3PwBYjH>WcY zA$vC|>zRyT!;!XO8kR{yM%3h*{#?_>)L30)gnibri?uhQ#Eh2&P0FFp$2O&hSg8F- zFwzRfHz2g?*M?v2&SO_~iX^Xy*@&_Cc7Covfv{&PzOr{{~+Q)3RgI%H5 zdb(KP!bl^ycx5zAqkdrTLOC0UmAVw>U3{G5IkNcQOO==6)$B6Hza384G%%mU;dizu z;1IK!a;$}<)^%2vu~C%v_T~=J`tI)tsVfygFK_AwZ@kb+Bp=fb?w6Q2~zrzCow+{Em?6T^+>1xp065zs^21%W!N4G7!+^6l?eX zB==9K(Xus9JE?GN-JSuQ{PG1Z3d}m0i=wCueYWJ#RyH)v5CVs7sOdrIi;9a^m%}-X zfk)X0DsNwa=|9?g-{r<|cEhBv-^S@;fylp#ffOuW{ z$v`K4^``dkMN?GZkV2OzkDM=Vt=co4K8XJL)+xpDp>f)Pi@JmRVTOg5FH>q}fdmyw zi)F*X^!yh}SHb9A7}UM2dyXH9_5BcMeslV*XsT#+th)dAsfmkPZy+a65{$S&I0q{6 zl>8P`&!nWJ5M>&&|FngWnFF@^^{q;HmrfuRTbWg=jIqsJtu#3SFo%qz2Opq&3#J54 zRq8w20VVfbz7OXVib&Kfy<-Q5efGI8{~mhnjTZmS+$`_@$AQ!;tY}XQ!Zp7=BocX{ zRFu4er3&pS|LoF*wJ(gK@mamzaXjGFsiOzC7wh~?DJfys@wzkuiJDGqhV-v()%2vFT zQ@BB}?_8Pc_4IyTT(SW%@u0p=kA=l4NYzGAU=l<0fTxNG*Wy{#-m9p@?|%kE^c4Rk zg_$CMzgO2VH{B2R4UlSH!q){#Yp1Gedp|HXGHX!t{uFuR8 z@qybZ;zQ`uF-C?SMrw05Eb=y12Y(WI*|Nfv{5Hmx`;uJ~_)O11g(OkOjfFzMYPDZr zp$}nR+D%0R5nGPX^n4gDs}tliX>jNu#ekGb0hzgyonJ8WpTwq4ya)MA_R|fsN^DS z_jNYIB)P>rBx*2UV`A|cy;FHWOGDa~En!yP49{tzcb;usvV96IZ)Q5bOF*&eZMDVH zF!698?;0&DobPYCIZjq}5`JUCbU4HArL><@*!yX~uj}YAW;*e+yt_-`K&SoNcaL7# z=v9CudV~omA_10C6_2eqy34G(ml&~33Ihn-g3o_xdO|1`qMjWWA6+|Vs|S!-n|!El z26fR=u3DVd7?4b(uk8IOu#(n3-WAe6gk=TT8KwU9{N|>8ovj!iIDAH)L@4#>F04K+ zBDR?N>MmER2A&qaR=oF`7c4e6g9q-BD1}Q+C1lriVfEY0PduRY_F1=T=Ml*^M@s1L zn{x)i(5*&7C!!JSyy;wQukl#E>HO!uzJ}Y9g7lFB&tF7=Vc!#ihlpx2sPg&?IXHDZ zh6kNPd@4;gVDBSpqfs!6mPR^pjte9eIZ5c?PJ8X!1CEa~h2{=!Wr9Y(*>zF6Mg4x? zzPx2i=gUxin#fRs$_fuIx)aj68N_MMSk;;RNl`|4s>j{ZU30}2^SJM;PXx024EDJt z@Ab8Yls2oNf3Wxusy)}4gKafC`%JdpMMpBo_U<4X@U4OeH&$XZuNe>-S^zIVfNTC& zAc8{3BaaT+aqWTKtF&Hds@eUTA~9~WWza5ziPeNZPp?KCgbU$UCZ?uFrRm%COW_vu zE|HQkbjkXNdbe%w?-=*^+_cz%X@0%oVB8yJp~+g(>2Xc#tL5LG1lH|T7r(7LqpZX| zhPdlAdJmPYVkc2B3)ZcapCZ4V3tFdsNnJId?cG*ZFjv+L|Ky0ZrXT+Kei_<+MD9cf zA-)icme=JP$Uq*g(A<&AT9beR+q3`5AIS*`#CuIwRtPMY{jdz6a6?^i@`n4}myl!U zf6y3g?R|b<%JpK(PX``U7n@c6Ytfdfc9zt1yK-v$V~itYd27}*gm4)HYUEx%va5cm z?V{=B6Tby}G#62{?OfLz+pmXa=O2f=EO;=R8k8w&ehR>KWYH1f)wr^9m~>B4GYk>nT^%)g5$hI;PrdzViYd zP{IhF2S5!QRqM@&3EUtf_@f`Bf(+MKF2E$LYy<3hlEgUODutKphEMp0bK=qU-q!U_ zG9GRtQX#%E4b8x+cmy(O#BS%qbYI&?NQ+iiWVh*REAT5Lt;6qi^!I;|IxqRIpheFu zV5U;&+xsucQeZ9oNpCNiK&}}ylA6LD{bg69zsC?B_R{J>m*(eWt#jK6-#yZ*la>ij zaur%SJMP+xh~bkyP8hi_7~2eI5{-bqSkcuX!%f3_wLW6(B1X1>w2O|W#fJCk4Y_5FPLhTcGr#OXnHq60DqRtFU9vf-6`&hM_hIKUXaFA|0;NcaP zz62Hnt<$@S?Q`|M65cd1xPkGL01sl=Gn3{{*3%%8bJCeVLC#c~&xX)?9N0aa3$;O2 zOY8X3L(6^l2LkDe5GpX7fdlwe-4F8r`T8$Zw6&=+uUDB68wSbzrvSLd2=Yb!sRBHf zGgbJ}TcNF`y5+(3k)!kuH^dk6#s2)8C^hD}_qywjH4TwfglW2SNAs~L%KmH@5k}I-O_(J=m zHxNJZsoLI8avLRWmhEY;z#~lk{aLH#WXO^wFsyBsa%I7qtLmI;YTXg&iMjzp6i_T? z3I>Q98_-0HAa><(-kYUwss%y9%zri8uhUg#gfxGKf|)_i8cpKj-j5$Y+N9_4J1EHrp3mCLin6cr zT!<3uOAw*9ZiqJ))&7{IbH7hdBl4$TKfPDHroHO3&idPrYhuTQjn3J87UhlfTnVXi zs~#07Z(7fMd;C~m)-OX1F*rJEFt7*a^Q_m{`i?;08Oze>x zC(ug;qzpJYtJ#t?^tl?2cDKIegpoknR5V(AacaZFu*Kf%aNDq}=d%DZ7#HVDc`zIm>MpaZ^Z851a2{L#;i+#N zm-osjYNOu;wHv&gF1`b9B&UxwhERVv4sO(U2Z|&jq`i`^?x2AjUgh4SX@1i1bT+g$ zoi5+`i*gkLg*9kE*JJMSt9*`SYek*W>l2^Q+5?E{TC@lT1&!E~e44n)aORl>^Hyh=t1>tj)&h zJMC(q6iJFVtBa>wa3@TAA_Q|&2$sV`o+Cj0c(bMu@~dA-oV?f2rsf*Yxl3wlYhu=@ z(0KNz`KzlEWViymAhld!C70IRb{Y2^($p~9h4VtUStpGlV7P#41z!I#Y9|DVz$X=` zPP`NoeU(u&Z|7#rO?P?vnEO5V&Rd?Ce2{{QMG=1s59z*8R95Z)W#m2tJlkn!>bZsn z*;#h;qO|mj{~|py6+~?oV1aPG4*V}}&^}Mq*>4a;H+xc8>QYWLTOA%zgSfZ*N;>wS zYZ5{(%r=l__4O(515aWYSH}o(zcw+)WP6D!jwaGzb?f#o?F>gd`lqkk7t1T~6buX$ zA<-msfpy6BB3w6~c#xK-koz_M@fNUws8f@IDf`Vxq%H@ z5f{(!Q~Qnk0A@u}3wdc29G&#i*e-i3>V{Z`HvGP>)z!0+91)B{{Q7W9%#W+CIFcaM++IhuW3jj0%kV4k}R`iu&UnrY^={P)_nNrsgeN zj5ruZ(5$q)?`KRIAJk`8V@T?le@zvR4UD?i4im6%t*{^R93cs&y3&7Lp3Uq)cbozy zC}#rPCCz2G?8Q~{ojU@MN6ev<2I5r1`~L+bU+7|c4gxtme3N-h>~?=l8fbLUXeAK0 zR3rzTmP)ml+s-gBW03xyft)#E;l8>@U}n1Ix?Ne*+|(lzgqw*Hk1w_1(_SgEeA4-c z`G7={JG62sdT1e0^SRZUD=Cx{`!vZDd-2$psb)%hLV~8?fLKl+JE0UwZU268kQ=aT zPIuwzHyrvJP9+Vb$kmw+Yo~_v8$PkHUk>_Ov?-d{<~5R11+CR~D-0F#?AKO1yrM_k zkQ8IC?_+p4>Vi?(UlxE8Yd>QQ$qQO&a`|-t^CC^c>NQ>?9w!765NxE;xFNQ$f3`C{ z3t-sSCj}DYAsF~%JpX&o#o7%C64y((*PtmxY)NHdj$K`NbcnE$nZ@S`A5IL?&X&)W zfTZ(DN9@qn8fPJMqA-cO5~OTCH-^Zn0ja2wfG#kpNk^`oI&DE5%V)ZdO@Lu~gNBm8 z;<-6f@&-S&zuzFU32y}hs^QN=Ye6%8fYB1k+JwzIexDQqPo=Q8xZ<3Dg%lT*ExI#A z6Fl=2v)AwKDZsChMD6eUmfVxl$MX~KHpcd}mGWMG=3*7Y+RI%h|FLWJzVTnjgekGN z6I)=m<*mS62k4|!ueH$!shgOGlK`$_$u|TwIn^f^72uidFitoE4eZx)+1p<`#__1lh}+S87%&c65SQ6J0Vs6u(@*+ z^q-i5LtLsKkTwR&)kUd{RaalUBSq#IqcSa!H_rNw8$>)IB5jl{BlDpCPnO_=0(U*t z&d>!}VEVjCv>osPRCWzPW&r%E)`!h_taNsw|G8ZvzicVJK$&`IbhI6?Al#B<4jOIT zANN0nWglm;L!&7$Jc8tib6%NGO<~kve*Bg7ZK$jile_^^fy3+APNP#rw{t1+?w79-voLB=zGcr1b=dK&b0P%p83 z68p_Ztc}c!RI_W~AFxFdZ$YN`AxR%E773(UQZXZFydI|1w{Q_t-2_!}1Xv$(rvU*7c2UDK(OH^!0>9YK<|gi0_B@N>*Kqz+q_> z_F&KJa$LMit;ca7OALSzD{l~Y!iTZSmG_Q%IyoB%QX2HsD0a5}w({z1A627xA;hpC z05nC+A?95m()^GQRp>REwoxGw>|($E=AA?sLHyf-jEfg4U3-1hE4wDljl(dC3a{wC zAA^)8FbN#08nO85ujc+4cbPx0aH7BUQ48Mw! z%aJX-sHCW(#NeK`vc={Hme-2U?#Z}eYP+F7K!5Tx(d_)@246Y`>D5a)pJUgIoyxwS z`|g83$(Za1(}b*THI{j~(eDo^KNY}Hsrrf*m^Va=fm)@iJ~vnP!y^2(SWs6^>EK(^ zhPrG%tEhumYgYtqN2Wh{dyOVc(r0?>aINer`&;~b7h<>e8bP$u*4}QuN@`3vJEbxF?Ow42 zx(@NKMH^{LzjDI6po)x^Cua-QX5UIc4fq2;xtY^*D3E2gd4q%Y>f zCQ1m^fBlg*^-18w^IRbxb6!ZZ)XyGK$o6hVVl$)pJ>*qYoz8heh4g}6WxO~8M(?lJ zrD&>C3c0tphidgA@KCOK1<8E63IEo{(;PcIHE6uJoHg%P@FPidMcK}-Ef zGiQ6LyJh;a>6R_&-RaPoh9XF6kx$0M#CTp*^i$t^-+xrbx3i;Q-{+%r)3`so_jKEP z$I@5lZ7;P*8Z2<4hfosT7UlTtY#pc=1JG2CMa~=Iv^z8I^indUD2X955UiZG!^pl) z3z5`YT{Q&VYsutA+_&`30t1?xY-1!=rUfhS$-&{22CMF=v+eSx$#vRXGkf<{kk_9{ zo8vn#A)Av2S^d=4qyx<73n+%qF{7VhK3qwYJ2U#@_%!w&!H1704hdrGhYj|Lemt}R zwe@6?FJ`JO!xIxp!2I$ol_FKXKli@lEzdAov;DUo#n&+z83SOf#pYNcAwXG{j=K~a zhhPLv;mNc=JNTjVbw^doaXa+m2xZ@1v)J!-R7>HuJYq8Lxf}5LgdoXU-!ouB%|8Kt`&Xa6`Y+UlFFw+D0imC!ulh>o(?x1mz^95Rc)0p8! z=RhrF@YmId!aF6Ssm02sEkG*UR2DCKzYq z9H}X3dD)=R!^51pB3y}40398&y}d{RzJxv9AQfB1ZHvMds(2`5{dIha;lwg^m|Xfj z0l9B|Uj|HN$)*m;W=lBF$t05cO}5RJTO_p<*k4@=zvXKH2T-rGd-Cw%L+KYUCjMUc zr~kX|O8`pk85l4=*&J4fBw$2~cwGyE989yz)f^cg>0F?hJR0Sj=OSekQIvlyrEFTreA zKV`~>8Z#{Hb!5LbXa0j(Sr0fD+LlIa>pxaws82c`y|Dlq(8g!PYT$oZfTF-}AAvZk zI_^xx)Yb|D9#!f6`JWHb(Q8j0(*}Hf@QNIhR+%F0T+*!iq~x!Fd`A*G??+D0N2BX5=LYs8cgeuy^kRdUxc* zH&Kn)IByMw0%g($SXxGxqcVRi$1gm}V>b7VQFF9^N3`0a>)0A3COgBc_MZ+sD;SM@=!7ty0fJ(Kn_b(A=0`uh3|OiYtb z%#zBOc|_nz>g~^9g@V;u>-02vL#pjM^@g5!9ro zdZ$iWz^Esw;g0^jA3oHksYm9AvE{4vV_>85w&)FUHmK~wJ+)b0TR2rIgy|=C2p0_0l0-MOLB>xEKY>(W+sWA2HSjbO4%xF8jYmGwkL6jAmHx+A>IAF^Y+$ zgZ48mJMYwfL|dy1csrrt;VUt+)U=|als>t+xhXj37TybKZ3gdo?7Nwn+e|s}9uYkI zRKuj|%oCJzKcYZBEB-bct-Lqz@Z@1cfkK=_%`iG6Vai+(d_Y#Voa|1>lX| zbrdUU!NbB4Nom)CZoJpDa6(JI49wjiS=Q$tX3rP5{6cqd97hdwydr#db`~)$2}~fN z<>D%@ZpW9@lF9ww>8b-J5=;{I4n0mzyVY(C6<${tmrtKQwEzbuN!(KyP>*GU0*8y6 z8?YSjq2f7VsC|K#EZDju>6J=RgC(mFJ`Q-|KK9``%qH`(F!ya-<41Qgbi#B=Emhr88h)T zf4U~1$Zndi3d1URo<(CEtLSzCv=S9Q4UGa24cDvPnyuM79>&;Qw*b6E1)Wz2SACzxI)XPblIDjR_xjZ-NsB}1wI4~{z0v`9Wp_L%iEEuTi4r6 zKp~ygHZ&Y}5GrtjA#jMQ{9uNdxd;O=#n1jT#W|8kM@Q;arug^n-8TkGzkPDI@_sRE@8D~qJY&v(Y> z6&Nc;bo#~JtWVbrov7>I^On-QPW|e3q0UyZn2skorl{TElm{}F9=Dep9bH{e&X)r3 zX*inisbg>$% z@Qv+7!)Q-Wu_#WSQH-wE>yT0s?E5zd#}#%xF9#XI#`mf10)wF2^CVI_>?H?(jM$zQ zX9#|%?j)Nd1)Er0x`ba8As%T_)+Z$)l%%&WgatmnEDF+3S}Pg=LSK;FZ;Tk}NA0?b z55lk)z`p)H>__tRHxCj1Ysi~lfKwBU?R(764`x9IyjJ04$$Kd;Z`>L7D5CCE75Efk z!={EqSaBxa5K^LXW>&&OV^eP5ZjNd#W=r!fw9cU;5jbI zy+M83K0%)isq=_u2+J;(gs(e%O4gMIgjdh!GpeF3pIMmtC>H@m@+b!+91%7K^cxHT zosRKcP^PBttMiIs5)|6LT;yF)sns zCnP$$0C@ZH)R~_@x3vXgZY?xRl$V!}6U4T!(KVDMAv28#+0ap?w!gZ0CSf3n{P3jO zfcqDGHq548Yn2$xiHwbI-^zLQQ?H&kieYx4<^?ee5x6gVIYvTTTU%g*KqVNkBbog* z0)9OWW{Ar|w^an)(sVpLv9>EP{c`oEVqU;xt3!yvct(e5FjMV0P@^kkLf3XDL`?Qa zhgC_T53zH-d}uO=onJYre(GOhU~LW=6}qfV#saT$l%3J z3V>GLE`(Y}xW|wF&pq~g|NgzpR$d~hm>VzKhpMk97szNhV44%es(y=+oZrGlqg+s% zo%Kq48!n$l^ItyZcN7Z+D%Hp*zhulaZ9lK+btwT3jUe+Cv5FgefK%EteJHvvoi z01eH)m#s!imAj&2eNo+*-90LESy^6uo|>3E7)?>41-Gc7;bi{%V{J8}xGBNpcv*RU zE^Pn%A=boFa_WzX1!Di+`O~2aqc_4%JKcZ-xu?7P>#hy(=b1d+S4WmM{RvZ$xT$)*l5J7mFbm#;v7tS80n(S}^mHwW?aSA zdNlOewV8kct(v``Rxdvqf;S^0ySTkYJE@3HTS7-19@y%ShTs@++Mj>x;^OjyO_DXB z*6HP|puf6AFuHvSG4wJYFo9u_ap%6aN6fJV4yz3)xT_WVjW0RGq{MfX#KltfcX#$0 z3SInvaS&yF38J|gm^l|ffbfmC3$!#1rtq1AdM?K|hwHZYBC;dgLJ9F`&%o1Z zyJoPGhIbI5;8EN+R{&TXWv}B9H{6pkb|1YS? zuZQos?GDR7m=fblwm+6A{>)iIlt&bp$1lIJ8mp}?_wyNzTUAK5qD$G|yCok25?$}S zKY$q2`S&@|2h_H`%=@RS1&eBHlK|zR5*Um6Rq@Ng$w~2;OCHgJ_njDc+eRR6jwd&F z78Xj0d8_&yUtSv@fAdXkrh-O=Iwf3mr*FQ)Ddp`j!fge$c`-0^jM_ORJsP=4 z8BW;)@Fws3x3&OUzTVB@FB(5SK0-#t`nvlG$QB+V!a(TW%ClW~7nqgT=W`D^Bs8?p zfn%AFNxlp;@q=AUX=!N%!miyS3{sI3bpqZ@9#*T{2qbrNusvy({`TeRm#7(wn!YluinePTk&;FvBt;OAM!FlMK?LdU?uIR`q<~0ww{&+mNOyO4!*}ibeZIf^ z;Fvvg&6>5&T3;%E2B(2i;#vHM=JF3NxaFIBqvh>gojGDmn)S3fm3njX8oXor&b|OcfN&GZk=; z@?U4SE)MyZP6|i6lZnFLudiB{9R3AAW&$?4uhSrcuNw;60t4S$PFvmodxigh2b=c$ zJs7Bfq|6?4FWAJyTDx4JsHm13z5;o7mFAPke(CxHCbe(T78aW5>q|1T;Kiz#@L5q4 zUT|az6@-B5On`@nCsC^11pRRCOqY@xS9t!S_OE6A-~B{7J3qe#!FoVxt_yNeYU}Eh z%^OShp-B|cxpK_l{5?EA(y}cr8p0Xcv6Gb;tEs6`P*U1I+?+yZJRwAMA~4!aIpcr( zkU7>1wfoiDt*e432PVAI|DGI-BtZzb{6}I5vsYd0h?E>Iyu5VyMlbNd)67tZxz>2ZoE1ifM$^u zNB~h8bl4FI0vj+{^Zu+v zX~Z{^)Z|RBcH24j5aX4b+`2khg|JQtz1{B(d&>@UnCc_`zGiYY$Eg4MCq*mckrY{w zW0~rCCK;om%t%cQSV{gg@MUlWLps8H@R0<~785=yI55E=H}J3Pss3DzjZQwiMF8e> zN^r59Jr44oD-Vl_cPAv5w+BsYKh^7Dn~SufE(v+V{v%b-we@!H2ig(JP}!kCro%-H zSDU*DT28%#_g2r02eXMXcEqM1jjHd)j5?DSm~4@17F>zx^i0_wK(~GXfGp^^F=03D zcr;{rMNuz?xaPqsa38ln;r_(pKpc)7>VmsnZZ-5pG}G7Y10$IvzsBR6}cC*p9A<+LE1hysn-P-2?@!9%d!_f zY7E38E;hCY_*`}u8#n+5nD0XkZKxE9!638)>H|n;fll~fPMAZNL{bdW8kaPH;c&r# z?I3uei-ZP`fx-TGOjiB?@t?;{9j6ylWGb%b@XrTW z&H94F?}98Vt9ZiexI?CxYZ12zkSSzvVUy&{%#on8V{$t)1O_VbK>wl8 zaG-B9hxS&yP!K{rm41cdz$h(wCE5IVA!}^Kt2Unw9YHe?aBy%=fogNS(aiw?ok%sC zOD>ta3s~;}=7b9E)`-%f-=BiSS<4(y@IeAmx!pQ+F*CGqRfaD!N^xGl zA3Y-0P8`V@*+N>4i_IoEUq=X#^l1;a?_$?IUmC%@!NXg3|vQ|e&3NJ z=#SNt(`Lth9-|3XT(A;^Oe~^V2m= zRWFUwv_*0s*2s=TXthQ1M$$+5|8e&I!Tlg!i#qTho5sMPAwL88|}Tw zNs{UlbZiK>o1ajv^!JonyuC`EPg>)DI!Tp>mmNHAn@&~}^93yoCL4&`_cX1)ooM1J z=5aFIdq6Y9c_fJN8~pvoJLQqTMr%5z7nith{dTu#vB^&@`j|XTkM{Lz1eP?2j(T;N z$;19~vEgjlTK@b-=hFOvc@cTfk;1;FT@$;&9Nj z>yPQJ5C76Fc$?gvDgTXpPiacstS~t-(czX@%J#CFfxp{s&u}0LTpO=zAB+;<-+Mj84E7uEWYQcYMcn?{(}rFHuSj;PXQSxInR z_Sp7^LUbBOicg0|$6c7uC-Ky3pWZot<0HA<(!lk6WWJ^lH9#m=QtG?f85YpK_uZsb zIT=3-Xyi<1gz0&WQRc8tCgu3u_UPHtd3I)>;5WoA`EBF#0R{i;?~#>fkMJJS+E@f{ z6<#lcy8Awf;Rob$PNy4*@J-D|g2ob$75=X8&9P5zk8WV9R$im*@-GWMt)b0n%(}VD zEQc!}SDcT0MJ9TwAQcHV835*R7c{HI>Q%zyl~N-WPWviQDcQ~aQAbZtF2Z|gUnVO1 z2_JRA`~DCh@)e%9Xa9Wy5hefKVDOT2pPcNJz;Fhi%`~nkRTJN9&CNK6*67^73CaH8 zBJc^nMS5_XPG&ub^iR3fG4nzK`q~!yHl;vQM}qD7NgJyLK3wZT{`B%csCKV#x0o5<};EbI=xugDR3Lqals7QpQc$@e!ar57;v}faw&Z-ce-vuTE?e= zHC@P7oG{UII31SY^L=M(!G6S4A2ls?sT~nzI$FcDhl4J(HSKdkm4z_#%2+)j2Jbnh z$bBV1nsq-Rg6`{(_0#tZn1(`_>G+O2e}7E;iC@N6iN8I(Dykt}gJKQ&%UK^@+TaWQ z*tSvbB|%$rTiGHJ6=zR3lR<>#4c#0j>paGR`KhtG*>}>J8B9f@{ICt5{9&2KzWf6j zV+lhcfa#cXV9qUDpYwq^hk0pxUaq5NKVx~%=n z-=J-l)C3?Nz;xCDpm;emGp^-w(4dbSk~N;YzJ8u=`De7}VrkFC>HaSAHq|m`T;P87 zMV58K&~{?PxZ~hCs^tNSW}`bBj}hLyODUPOP@!C;Yur;@E(aFza)fo6PC5C<=rwHG zTW<*vRgRE4duL%R@LL>Bc;xNw(Qe4>I3B0Zi&5(RV-Lq^`zcZ<{X9IO&#IVz1c?5v zX+$h(x&PbRPo0hTPEZ?gIUSTGw&T!x*!|^TPJ7HKfaH&bJAOjRF*JQc`wiJ7a z+POL*&u>|8^k$9u-=#GCNYN2wT7BUi2O%yoQ|0wE)%`BQZ8EaSL9i*HOnh*2M=bYo zXJuVz+#T|HRU`9un%0KpPW1&DR#@Kght$X){ajiwxv}kYlbBfPrUYoo^|ay(8Do`h zo3!Wtc^2%Y%8l3T8#tQo%inS*J$W;X0n#WbsR!K4sHFT!ARk28ECnoEDYKeqKdRd% z5f%|C{F)Di2!70&uoA=r>>f-ezT`s7Xyj%3?O?FEg(sRed2Ts*R2Is+ z5>XgipVup8E#(sFW={Iyd*7N*Lqq&|J8Rsl^J}s}4FePItPdtidtWs72C~K8nh|~c z`kjYz$oI6-fq7+@R8YZ?>6dSco&SNp2bWOFZJj0eZjjQ*Q(>dV)XEO0yTmuEtGCa^ zDgE`o6n2)R$>|OrkV1mrBu>$y9|chkrG3*ui{5>Bj6$ag4xx2!9PBW%!lfxRqD3n- zxt&L+qAmZ_W*+dTct9R~<}kR)qg8RZw6T0)k*u28)%_2oD9bN+fd~mTFfQ)w`eSd< z<2{WFaTJDcQxe9EwA9Kd8K)=Gl%@YjvjXe+x6lNnmw~2m2vY5$n(>-)3SUG$hX8Hu z336XtJv>N_&GrUU&nKkN*l* zU6z=T_)`kUa>j4x&(2nMsc>k$!FgMGlplz2>tXNeO^-q8-i5mti(2CTrK51=0C!^s z3#W?XcTfBE`&+G8b7wNq#h9d|LC`e!RE+RTH@RPenFq)YR6(rhdQSw^r%zuB0Mrb? zn4~e&jr`n{49N2n^i%^*@;{JnFs?7r&d^GCea{SEh4=$~MVt%e z)pjETHqNiwCnWYD6!~*f3&=o$^ni8aoBA?$>sC!ki9X&G@%-R!_hH%Oz&2eW7F8Ig zJv@!xW6xkiITI_Y7E`z-ZHOsFx|!)*e24+-+D-J)hw*ZhSqBQgD?P03&CMA5s92@% zO4sk0dcRmNHS@;qOeyM#lWU%&sMF3{e^Ru5{d8chew)hiTjHBh-$;M`j0q0Ix7nnU z5Jv;W--qi(fmvna%4Mz>AzexV`)N9Bwc7{Mvk!IbGk@T`U+Bg0702+T)o%SxA?nO( zQ69NWUbc005ms^TETx-ad}QhD1n62&fxgc62@2LivrgU@UCx4zM``3SZ*0 znqnRu9j(7hF8lvlfLIbBOxgqA+4pNU-cf%!Y_=SI^!v6`E8y}390^3!;Z$7FqZ<&7 zF=&4Ud2Mn#Z_Mv*-}aPx)KY(RPH%X#(vw!5zB%|+C?T++%2JWAOU0Y(35826xy=2M zU$NMBPRef166S(<6t?vI_3UO+$ZRm_FO-Ll<2_;kZToQ?&jrj^}fGF zA7@rMb9Hqe@r|9n#HRs%t+rO@n%DZbi{n5I_9@Ese)p{@{n6@iL^W57Ru8=Av(lxZ z7;jUWUdLGtJc23jlS}QsHWDiAIs6m*HQ0*zP9gG-^X|G+XdpAjH~wLSNId@KvTJk4 zqjV(HdfQKn9GEF7=wdiBf?A;wjH043Ekhl!F3X71m)jX$0S1H}keshawGHeY?Z@x7 z-zzUtzGFi5^-YuKz;|m950WEbF(xwT2y92ecmw;FNNOW5r#4bGGD)dH(b_QLY7R5zIFjdMtEw!ieQ-hzh*E`y9RnK0&cUJK z;=4T&x8n~$W7vmp`EBUr(6)86&#>(1#fV*|$TDkU}4okM#J=3h(Q{A6t56C8O(eDipipq6rv%x3vyykm(O zm*jcDh_F1G!gSp4C7iDZGo;W~uFgW-vluen!>G_}r(%`R*GQVkbX0%ks@7lfw~#ej z`CP#GaV(nmM)Yd{b~j9N{lo*QBnOLVkGW`%wr5MLAR0$aa7-)vA&qUqi2iFwRnMV+qrDpRAFd>f-s_8!qE!&ytwXH z<4CU}Ilg4r%ly&7XK&Q08l0?t3`m9Otuk41`osOcd}4^`g602HE+>ws^RG(A=jBoY zfyk`z zx0iLUjwW4>gLQcF#uiR=v<^G_rRi0j1Dxo;7I}eYjrzzTiHh6hLPqAwX;g7m41S)c z&T6(Iz64<`+1vR-7ff$wbHcY*ug|=PaI`7b)ihOog?)^(ge*;th~~n#1(v1y3%VLP zIDB#<(5Mb%0xF7`qKD>PY*#8n(8-UoO$JP50?ViHsfUd*zC@V(N{JS0V?=Siu7FAL z$`lYGX^IJe#LG#kzfXm5hA_})ni6gX3B6oYY;Q~cStQLXs#@U_dbo&w%<{^4aX0PV zVJH0h^ot-^TiqTv>yVdPt2Fd2xFte0^L6Qq^PY8lArptRx5v`*(U868o#i^NH^iGg zMgk|7xjLgOQ`@@}qu1?t3P0@QWJb&3E3M&)c|@-H31k!fX6asiQe>O@9uZfuGDaXE zx6MG{fUEi7M)0O+EGxae12q6gwblN?RkaEM{V?e5ajVPun@0q}I2b2c|w4LN_2`=L|7rmI86~%8@~!_LZC70I9L;7{q|qX_LEBjGY1vs z+dVB6xWYI6dgLrlc+%dQLrbCMS*K68HcZ!745+*KKC?q;(QSTLBHbs3Q?E##uVubO zZJ%z4zT_I=?fF~FB=>N}cihArpoJLDvK1Lo+hRiwi6BT1&9XPH1-?yd678YO^AWk zH67FTuhjTtt0?=X3uB8^e&MrB-u7&}c4ik!>JOyPyn_h=sV6nqgm%OlvKzetuC*Rx zPA4yU?M!GGpKV_3<9T;3TT(u;U3zni*GAXsqmMkAAk51ayPk|8?o9E*MkB1OT&yM+ z-Fh>^bMDV-EFpm(|F3FQbF4ttFRPm zk~SO9-ZH406AKh6eKA_qAih6^IS&U5OF>3i!eDK!d-~FkP{AkbRNBVeavpVGg{C+L#p?I8TGMVo3t=`_`4PzW z3b&B{Lz(gL!^G8VtYu|nP^DuTvcXIL#fq-8N=_@YyxKFv{puinjx|IDS79Zmoq$y_ zxuK^JGQ4Q#ED)P1Se$a|OR%`@TxBnx0Gh%D?WAmekF?aif6Xz-mlZVq=8iaN1gytt zcWA`%wtSyIrFi*!dUpLTkJ73kZ&m)W=s@aY zEQp|5DAVL~vf#kkNdGugH@Z+cf@JY)Mc%K9?cF219QEq` z41SF|5qgNX|K3df&$FkF8Ewd|An^bV|9!*E#bSfG=&|wk0||^%M@MO)a)W%n!^m0x zZ+FdaLD7tl(UB$7&UxR8bewnh_PH3U+?q<{QbuDkU&|-uE9Dl9Wl9uYTwEleC}@rW zQl?nF+EQ&^FKK<#M3bg-Jev;8%^$NJweo#EwGJ243te({3oKs7p7B=jb*_C`T-P-3 z9$--ZN?j`A1bQ+bcVGaRD6Uy5uy=4MW!6 zFA)ohHEO1uIfZ|y=X}hjnWDa=S*PjSbXB*mn=g8y=YR8{{-@wC9`FB853Td^?f|Qk7&Um4pysU zu_IVsRYbp|;}z;z*W!??b92Upf#I82hvbFFJkkyEy`Q1>Bjb#ms7iJS@nV~f^N!aa z-gn}0WWG?SC33cBQQ}JS{H+v!YbuW6Yd|&^Gz#rx}W5xVsjIlF)E%U5TRJ_MrQ zfvP!2K2^^41U{iR*kUe;%fS`M9%q_7T&#lZry>NXz14H5KQ>)7k1ugu$5d8+jf{Oo z>#g!|EjE(^hr9AKXysUmO^u=y9SSIyHl85^c?0XT4ulUuWb>I|?$9|9oiMO#F&RW= z=Jbc`{MTLg+d^en?t35Woe%qUp3d-!R4o4EPCiUqdLU;8kt!;BF`#FsNGI!CdBf2Ha=NZ` zKC7{+igG_fnBpn=K9tZ;qfOLW>rSRz%g=1ahneg6D;CDEo?@8)DxR6=4J%BZWq zhmV7TvbC0vhy*vLe|)U@`sML?l7kQE#PocGrw10rzG4GneQ&Z*QciAbE77LJo^c&) z>EORVU;(lnQOhDv>i>KbD6F!XKw>y!$1`2+BBD0fgf}%vVvWZH)Y6g)eye zshHF4A>knOGFL&$?%~D`XeV( z(s|uotOENgm4HCYe`MtA|6|QF#CPl0R`^w~=slUSuMUe2f0SrDHzWfhI~qvwPgXll zuUo#d6rK?acqf5))Tm#-1OX9T$R+h(FJP3AV+Ukek&%&Yfb+Kinjum8|0&_jevj7jhMQie_ ztLdxTrRg!}M6b?Cn2Qn2c-<^Bhxr`cy9Ez)6G-s1#9e zZ+@t{Vt&3MA@e_GI_5ptPYU2EW~({Y|9C`l|C{LlX_%^xL<{t8GTvW`j0X-u=j4bk(+QA1 z0KtOY)<~)j^!lF+bLP+mp}(NYVBEdyjPy-FKCWrZz!`2FB?W0^TKUyNTo&@rO zQ^VTkX>iZdBtAQAQHt&-v(F^wno`hh4g&z9mXwj{92zQ?I-fao1_w|Opg%yc(%R9H znLrLjs5@gQ!76KTX)F#~GJ)vCG#122c4+_itnI?oZ=2Ryl*n3k{@ph}hz|k1iK(W5 zYNm;fYQ5?!Due&goVP?o^Vu;QF(cJ9Jdnkg-zJP2&(W5 zPa^#1ze54D9fTo?2vR-Pv|U@a$%jAh_EnpxgTDB292NX-*Qe6v*RD-uv;S0XM^Frh zIFVdh5VK)q`(TK`9{b|?mqNS9!w8p&;CPz&0bKf2pYo!5GHx8R-n2zb2RlP2ftkl2iQ1ZF5Fne3CP_M1@AKyGA zruK4I#M(OGMjZ>@dL+x14QxR zm|W~T>oPIX)86nJTj=C=_ z*A;8gQKqQ@GL7^$iX#;gqQ%WhdWty`W+DG$8?5T+6K?vGyz6xV!`OUDQgY>TdSSI_ zhC|T3@Vqb-=#C{5&x<_Z`_}X7%wxg**|mFVKHd$x=ghblP4oKr>Gw7AUOAqF>pg*7 zT;f=r6U7w4-F|fW7WUKa0pzB7hnwH?lwY5BJpQ~*`eD6|{LS+|{FA*4(Gi=&4ssdZ zrSV*an$k!3{v?YTGybIsh4DFMl9COGk^E{6D? zz09i3Icdr^fsMKlE^oS)g=5*^{YN+227WDx0usRx@yio?9wiS;Tk&GCsl+8s6^Jjg@o%$`S?J$u~e z552BImbfGMcFzz^sfy@2hq z4g_&;6y^y3AlTg63XF)rczSvQ_c+h5NTu+BxS%6coE#9%Pyu_9YMC#CR{hGhiw{Qv zV5-~MQdkkP8*Kmlfl=7o@@dnJ@-GA0>D*Q9#b5@zk^R6h8T|4-3<<34t!I zA6SgVtk~bEzLJ|<+IHUuRJt)57ouitVdy9ibuZ)3Bt+D(F5wz z>a_UKw6}lZ+`fCSev{`YeHY08z)-(U*vNh871#s$p;4m|&vh#aoFupdMyVAcB80dV zUR@ZRNiKF?nc}LU1O&JQHHRI&4I1ZzL4~tShNOp?4J&rDJ%t1oO=d;&UOHiG?!pLpQ`(H=7bwzE*r=-Vu?tAR zW*XgC+Nz z&4SmgXqEJB4|Zy4&Uv@U#PL)#x@^foGjDj5C2kAL&nWLwTMM3BxM%aTJ!x(#CB`P5 z?Gl2TB{EX4uU-8(!Nl`#7ClTw#e8ZoTvba)EzD5U9Hw(vEw=i8dP(0pUt*PnaF5%} z?1*Uguyfi^ERRs2xubw_1EV_7pRi0}-T~1E>zQMNzJ_Q9sNB5w7nF;?33HUj01wjk>aq{fq3$-45}LDyMVF7r|lVT`%m{ZPn`#NOL}8G zBh1(I#KcG5zkHf_#u|mR^gH}U&Dq4RoZ?wH{IK!l9JB?mf4vL6D~yS$LLjGfz=V;A zw(l$FG#b5OSN@t&w8(H{9sZ6MBQ%>F_qG5*6#%c2Qy`UKo)uim4)Rnp5hAs z1;PfPV;=&F!vVM7Q52F<6pJP1lLca+^MKBjK=BHydv$pbh2$6bNTK!lALQqpEZSaS zA~c~T2W_9fu+=zl9L%AS84Fzlj0&6892qS^*w*&4_X}&_R1T!#u1K>cxx%8^*lO*K z_{3RvOl*6}GJ5%QV{TNj!#%7DDI_6i-hZ{)*mh;~MG@K_)-v~1f0lD!P|dF~Po%Ow z2`*{vWc=>hC zJ^~@B;BvEw>+=v^9=3Wpx0$mvF4x;_m^{Lxh?>*Ao^YEW`<#WS`%L|HG^j{!|j;t>@sZ3xgu zVgmnbjyLqCN!C|T2MdFgE!AdEF6f$w)gapDiv$YE7!ic97n6q+_xd!qu2=*_-xaTe z2HDf~`y{F8dxUIFIh!eoYtKGQeePI$a|4Hn{dz6pDNq|;t|6c(loz%6#Z6*ZZLu0f zj%KHx)9NOkuxc1)NuQHLonRq6JRA*;dgU~obNcu$rf^!p9$WO%M()EfVd+!!bTgm7 zP1v1kvfS56Z2U3P5`NN}0r9Ne3me;s<+vAp9q#+OZV4mIJ^+OGvHroJbV zWM@#rNpva{nz6t_mICXKIZZG~$>(?J>_`ckUM0s0z^FI1qcJ~9`>dhlvh)pAkuwe4 zT?#IzBOliLx+8Xug^8O^p+?Dxnl z?!u$it6I+k<38h&KTdSF<}VP5*xqs-VR82mZD%qzn;f4~XgYHs9Ah5s@%jgmycM(j zw^=U&v7N1To6Xd7zni4{5HG>xI?Km;7a zEeEe3BILVSTdJO4C@*Ba+Qb(78RmC?^es>0xEoVh<&W-|=S;QOXbIngS;iOLSN?|@ zaIw)j@F7&JlGg065fu8k}9FY35_OZl-kVr-MdULghQ zUE}bQB%=Kp#!ljAF#U&&fWhd>Ychh=e4mj=(7?f6j%fc6Mceo#I??IE!{w-9cAB@B z(Dv|X)wfABfk3uHsq4R=(TODW2YMVNljeLYdalamUw0aeJceamr>=*MZ{J?-GjcsG zFWGLryJw8s5lAQZ=??bRmSafb?v$6tEq5HZMyJY33o{=cun~~E4?i@0qaHkP6cCKb zFj6`_EuvDHcKN36^7_2Zv^G=Vt^^A+O<^KydK(*_aQc;|^^Za=C!OsNin%H6Iw?L+ zkLgB)qrc0oot15YF_!=FLMa9EDg4gAHEnvV#_gr=5Pz4zXK)G&rdSEk0PJo6{9?e*@dMDKU$1_F zzr(nkM4oeLAMu16nc;E8{t~oTAsO)SwN?2}wFw zt_@{tA?hsFMi+lO6QfJ@*O|_szSH(*9&*-+NLU*eCEEV8!c?Cd^BczxrwZKj$Kb_{ zdCgdoT)p!@^jVu9&ad<3zmVslcSwXAJ}1QUKBUeHB5@rBERqacP0!<_@y=8MNF?k#deR}a3k`d4F*v5_X*?V8J>2yjQ=gnsFSc4WTgRRPl+uIvE1`4gT+fl099ucjd zHVfb96v0@_y1nHj63*1g2`kEw_td?KqI91 z?!6S*`+^hpy1J89X&SV7YScOjA*J%Dxf=h10;t~|^is^`6M2c~rUeE4K+*#cV-5_y z3rs~tP*Hfkj~{Lc8>1s)GuYC6cp0)7K0Dx_8v7~?l>kpt^NxdOSuT7sP_ne+h^{Rt z4-`d54FKX(0=u*x5XprSalIxbb^E6fWzFGYJIWbcR@x{OQPk1Me~aZk2%DU-YM+!o zwu5(Ln&o$_lhkXmUx77jB?Iw7$$Yw_^k^oL{>>bFYr{XBx~whPZ?ivBW`_AV*NHj# zV6FYE|3+iU)i)n+IDPV9^RebSZDPM0BA_N3)EmG<_0*G9YU)-{fxYCL7%Eu0j=e##tz}&}(PVp!2dChg_0bhEO zvt!C;_$KF>-am=;>9|xLE4MHGGA}nD+fbB4my^HL8!M4UfpAhJz{Awi#e?}HhJ9^m>>*fZl}5?KU3P+0?$+WtR|{F4#|^; zZ6?>az{Db0#gadCYVGw-`%n)R7%n*$<;BGjprT4j%0NK>r_&zzZL{Yf`v70+2cB31 zNwXxmzScF{mF-Qjzq5#Ba8LZ&u5TUB;KrTcQU>)y79~rDgj7m!l2y`;ZDF`nG>kCh zq3u!tD1pG!v2o-T20fTSuOm* zG~s_MB7VN-8~d&S9TI;TiVKM}rvM+wx!?Ne1ZmHYCgN3y*42`iM16O2(~JtM!Wtlstz)R?rN;JU`1L$X-} zVvd*VQzf*Qg=C_`8`<}X#WBp~f5oP2`hHSx5mfK)#4HWMmSBuZJr;yZP{5oy|JV0< z0fT_}l$i|fPGwhkLOC#_HBAqhzj-ttWe4TnwmvLN>m&)rp1_F?>@~Un%0^!_bozj1lZcAr+Pn2x^%obY9>{&E zaNvsfjC9zY-IzVN!4K;UfL}M7FsR}w<4!Wgflua5_eN7jN|M8KV@FiGRY|huAoJ$; zzq{A1;_0jEODm(0{Fe+xxh111nE}e(9s1U<)aDgc=O2oE^zr0e13nb zkB;-!`kyHu+sol>ne`<}oASdI$xmmNwE-B(RD7Qosnu$=qmVPk1QpV98+&KUm@LBE z(^&)Xbfz_5;eYdLzPLGm>H5MH_Q%g%kwW)adBNLJ>xon*!m#h*1d7SKbF~jIB?*!_ z!gyrY7LVD>9=A7ny=}hv4t&%-Se5p?r|hi3SU(8XBI1huNdt!zQ*>^^^)MjhyCyMO zFNOzecsuoFcS#zgKX=1K2xRI_iU;K82>Kzc9HMxHw{JhvR?#QGAfH_ z5~Qv64h$H8Rdqkw8Kh&t)2R1j36t(aW0-VM(>6n=WYyE&G4L$SM=LOO>SW*T_0xLq zYB2=QKxrJ6Y(?+1oQ?Hn{Lc+cfS+33LE=55__0vZ>Be)`z z3$1&YE@iv>S8(lBN%LG%`onY~w#2Wy{AnfE^YYMrt~=s`s_bTLjP#5r*)NahIiL^9 z*qr^6|JiAertzY>J&TGj+Qa(hRXB-{RfF!dt5D(<)7=Zxar8YlIMUQ$H0BgA_k);b zCijb-PbLV!1epjH&Yyw(D6bVJP=D0CQbszna*nF7=ZcAv`{K^7l8eIVm-0hUz%vv9 zmHRN}ZuCs>THIP$*P*_5aG*!uC1cza>6!z3?>MAp?7LiHtHMXyAXTjqwk4pXSS-=H;$DBhS#7*7mAE-R*tF#R z^?7ZzY4D~8o6dSN49oBCY$c1zlz_e>tgw8{0H@xW@u{`Q2_>G~slw4cPXkfC#)ooj zbe-gfP0q?TgwRsy*qxU@osGwv$zFc>Zu;n)SF0f)WxW;RMqy zKECm4jH;37mrT|^9U(4#?m%aLXv;H6#vl%RQ>tU)&;0C@>*w~~vcKl>E8D5M)6O&1 z#LkvMD#h?G1BI_iMl0cOZvH%bx9gYv1x|F3w*cb4By*Y8^GQDdDM`1F{O_6><^qv=80~3*U>oohjCO9+3gmar z^HbdFd}lf-$RpWqzpv2-)I&W|TGX8XWe*R@|5bidGd`T((ZJ=+-y#|v%teHteq}20 zyC(mp8?u7E(lb zKhy6Ba|NWDlLAQV#KC+_0j{|L!Oi14B3opZj0;OY^&{LurtiV1xl` zg#A0eZe_u*LomLujV1msi*amYQqS8J!|OB5fUNma-O+IL`;#Z%q1TqpjAYLDDwL)5 ziXh5!_2->4ojM9-?5edl=3UBH)HLDpmW(8Bs$IF?4K)nDQsu~5Mb!DyOwKrbQ9Yef z@~mYcwUUhT0>|5_@J}{6!OJ-EeP(ZcI`#(aN_AB6Ogc%$A43-Kq>U)ETwOJf=M^cb z=!i%!x_M0bg_=kRSij~e;niRHlo{x*V))`Lx0bfFr~Cai&rZ{luDzBY&6J;|8W`yB z*9YkSarJmz zwzlRKqu+~UIdb_1ToD}D#11j>lz<{nTFRzy0-+~=#G6&4(aijn!ZpwwW$g{j&nGT5 zc|iS0)z*vsi!RGrvm#Lx`9!^Gc4Ff&@Nl2~_qxpnUV>zpA1m*UX%Sgsw+U(T%DaZT z9ip6NOfJqmI4{(62FaKm4_rvm605To=jG2L7$KA&7=vy@yw|&)#yFbLC8G+s`S|$* z7+Df+p8j30;Xk(HUmlO_8h>0>WA?(l^g|)GJgsz>S;DynE4kgcex!-K0kw_oM5jdjarPtUL3 zzi(BlP^F?WrNs)Gmo4cMcQ1ht8V6ly5U8XNUNSqCb5dxv4Vu12A*)rfASRni!*d+qO;=_Q6BqGAKm)NbCLn4F!2JP$p6w_3Lm_EI;n%$>D(_RKC))hdq?-7e zZD29$!&7&;yi3MtFn`#+g238Yb_TQK@ov}JB%WmdT-R=h-23;8@cj1Z=MOKjd0Q-C zyvTHZ#7Mt{jgm`hJtPg5&0O8t9-lqe);ap*BX07Jf->c2Y2!eBSVw%#F-0|uA1c0d z)K{`fckIZ^M5Su8#;pDg`&WPHu^OMrM1zFy!$%>mMiB+}rmUE_>6o}L4jVoa@zKRy zk+!Xol6}OwL!;4X1?wq8fCT9G%#12jol%0yVw;nb16_|xDVOv)ncJzKwFKO3pYh_T zsN|00?Qu@fdTACCLu?rJJA6->2}bt~AJ3u`0$TN5q9RSmc$R}B6y+U7nbN(DWdaNA z;@-h!<)U02(>7a;Z>`kpF<^oFxT}Kn>Q%aYD!)u3dl-;b{L7V3?I!_EzR_2;?A_PsJPFIyu`+RUBO3YJV+k>#Y(&03DHDxKy4-(9;v;~oYpyz;l9Y=%z_gDSe> z^YAcLzh!fdN=ibNpH<7Cu}ur-Q2~y>9C_~m=SbA7O@$=$KNn$_!0`HzLcC;}xWT9I zFoN;n0;C@CnQ}U<;R*JEF9jHM^-TF)s;p!*zaptmz7TZo7yC2( z5FUr3?RnNF6Zl|-pcTMKQ|NkitaH985QO!1BSg;5pf0ZWMIteW@sqp!fJVjY2g#@- z1vv|c=2Q)Su>%7O+RA+eTiZ{kNxB1+^5V7)=*a1-9MuB@*qXeEavFV8xzn^4doL<1 zkiwILQaMoOoCDH$cr)b{(-g4-MsTTlt91T)g}f_qUY+ke%$a#jKVM@50^0Z*PrC_k z-gvtSmMvIwK$A^y3u8cj+=8QkazL)9Z**mnryOOj9VJ_|We5&wW@PP&uBWW{g}PO| zwBN$3ugbbEb-lwik?}S{if=MtACWLv*DXF}#5jcnuDNliid5Sl zNT*+{@Yf^)c{9=7UjW?Uej&D!j$@BBY7*ESc8D;gXKW7hLZF9!E`ognNRp?T>{y(O^ zGOEgG>zeK^>FyTk6cDAm8HKJ^MHVKDj;3b4bt#!y!XE2`^AsLIO9C~ z*?aA^=9+8H(C-uNS1Z~AeHm?n3U3^0oz{0^M?6`wER1}@bXBO{aFfaVW>N5aa%H*Y zi1fxttzBf@Q~MoT2hIs%29QyRzU_i-yB^qhKzg}V`>@b7=s~-(h+274$qtYY zhB|Z6A0bX#)3eQ|`4hRP*-M-zq~G86Sh!Hu2ee2}sjlt7S0{6_95-dbxL~1?p&z%S z0~G26d!jKou0fF_!l4*zrQ*acK81MhB=uykw`)r_&epWji;?4$4 zq-PD&Ff{V09HYxDq^U&oeM&Zl*l8vrTKy5)@ju;PXR&c{LaI`*J2Yu5tT~`T(ZkNy z!Yed?!>e>?;efjaovn(8(D7}gh%tc!$@W+I$MDeLCM8HmaWN9>P4*PWm>Wk74Z z!hc(G$8-9Pc3&RPX!_rDO!J7F_VES%vHOi%C}X~+MX}-9GC>5JCcGskpLqr~YJI!H zSv%)$>iN{5ZqB}_(AgV0wj zFj*oNpC*vSZm2C1OBd5SY1+;+u(`JVCLv!*vM`b@k})%*4%Cm=xV{usFB}3A9O-}5 zJ16(cjiMc>;BMRzt>(Ql6-QSW`&iEoG0+6ru%XR;JLGqGh`pxr;XLp4!;`(*Q-IqpocDlhr8paFy2Fyqr38PxP?X^MO(jnDTp1ZyY{u+ z;IFJS=q`$|dgH*i`%H8ND(nyK_g6)F>gYGEOw#t`_tkLz`qY*aShtwL7>9Voy!bQU zSenvkWu(;bLP7hx76?QpgJ-_vA`;Rvv`Oco^4L)Bd%o@aU`mQBX z42zJhyHDO3uBVAN{1_h}IC9Ou;=;wjQQqn2^SqgKxLDa;6#HzO0UO*T25s(eA&8zT zc(TjDc8ZDtDG%^-81`3sS(idqQYKY3UC`o+Z0orn0hnAggbmC$0s@=c+XXQ6E>QlR z%a&WvDWJ6&`YQd!X@7l!uQwh?{wJ78h4r;)D-^?JEUl_lUU7)B5V5>>HUDR$l|j@K z2-?XxjMHwN+43ysALy@YO&1$oxLKmmhW6Fkl(_+~8UTk<$f4ts0MOTqfHIGn?ko_0 zeYTDPmwdsqpfdCDwzIjDtR}k!JN#!h3EM|-!s+&J7mPL;4GD*c-)D?iD z>-3xolm~TMH!1mZEx7)=K9|SHi6{@F>9-FXR>K3D9XYb!8`DGZzy zKMfyBe|p0V{FeLJ^;8`89U4tb<>^L6MoqHcIWl* zpu4p77e$b4=Q7(&BN~_AbH=3{o8EE>ThJqbH!IHy_2h5zv6xH(3IQ3}vzJfmD+`S0 zxkP5>vgg+9ltNjMk&68*Mf^48muaKdsi-nn%mMj7H%Le$k1tVOJ44+rt?fy8oq`ovoP*G8dBGGCj zrU#`Nps$$LXz$F|F?4oH`8s@s0aPu)tTDB^x!rY%x4aB$Qvx0@am#x{V<%sG2F$;L zWT6qHL{1;lFTAIDH6B@31~3Pt%d5VX1=L_gJt_v=BO>HcJ^hC^d+hmA$+zORYK&!@ zMTVFicipWGp-nBUbbT%?5xW$4od?4y@OQX*4&yz+{X|432h;1af6f5)jQFqtdLJgm zCWIliBK)`0$trE=*Bz4CFJI`iYm^<2&|D9SCW?F@8#4SXb59xF#wYqR?>U(X(TzES zCU8uh+jJ$i*(YqE;KF}g70}Aes^W!0!y<43JvK0K z;i)%LPATKTOvL@olK8daD%*43>GIKE)#L<=zaF#E9{0W-#$OCRrvo-t6neS#jZ_UW zucqZ3VJ1A>?#J6*25@4>Hxj#y?>GW4$du0l#x8MiaGtSQplC@aD%v?@LDZ(t4#3py zsk)m7UuNVNuWIJ9t_d&U?z{nxR?vl~-q>cK+#|SHM5i98r=f!9$Zvqe=;F6B1g7ZH zS_o>{c<TO`JqJqA{3}KiR z_BO3R#_V!WaX?k-0Umn)Mhu~>51r^&It#R|6J4quOjN+y6dOFc8+Z_Qmmh#4?!*s|XU`ugI_I(H71$8q7z+RPA=`{`5h6OOaI_1qJ9anaWJ zsW8UFoP|Hjm>Yk&_$@c7;J3I8L`_uf%JnIw>IGpl%L;<(-_6clKVf)T<|a~ZkuQ7RI#@R7N-qti!&gmn znKNhU@=(tWsBgV`=0X$-_2!WOIw=X4yCUo`6t4>RIo8s?sFWRo_uS$*S*3yD;0P@U zhZ`58nd>Al%w+exc7pFP^gqIXtD({P^yu>}_q1O%YBqwzrpZzEFXm)JY_cUS- zRWT4)vL7op!yzG-uB^^=uF~^mVbULpVS}h~?m!k6aHH5iuTHE}R82F@bQWj(^tGFl z@M>Or>7Dvvk{4r>;qkS%tIe)s({u z2YOJwPQiT4%dT()YcLE03+r`IGw>V{L7*Pck95ugCm)^5QvaL&LFNr@?6_!z*8Ngc zam`q*Muf#MkZ5qs~$s zr96F4AXwR`ix>rshdxtT;P@ocx83b-spsl`a>!a1CZQfJwybswi&3s|vimNmONp&L zS_Sb@919!H?zcD~Y`}(5Dq+YbsOP>T_X+_@h4-?JHS)QMg?my6(ZgS|6_q$Pn@U)G zNWmi~sGo*KDPNzNLdKR(F3|{i3@7qP+E0iN7SP#_RZ&Kx6y)&X5WH$HkJNcHqJb3w zwBvEd)jH6tuhd1Uk`G5@=)D&RLKlMobjF0n_BT>iE7DsrD7)Vh`UWSbEv*kYxVX<8 z{Zzn92RBnyMTrJIO71lCj$DGK?pVk~u8MFGXFV~xcNT#Hk4Q5cJynP9q&0r)-;i&$ z2jWGO(D9SXuMpCC17bw-b(N_qp_3YVdx;Sv%R zztcJ#_;n>lUH&s=8vC+;qc!)_IFD~$;JuB)uF$8w_QE|_M9Sdp9V44)jm~{~y)%Wr zt`{&UcLJVmeSd!#sA4kT%!B1zvI;zL{JKJT6d7)AmusdMdW(Kk;nx5Pb3X42OJKe< zl0YX12r>yHyvc>uZxEuNL#IruRqs2d_PgTMYb)(DShOx9ELC}tiJaN)*;iB|G84`C z^>(mXt*-~{vj@+QLQEqT7xn$`Pv6z+vH{ryX2%}EEb?@24a*5>lWru)-^=Pf2FM?? zlVO6nHj-p^0#`fC=!MKRpu_^gOw*^Bm~SB#v>Mz^jx-^!OIrT!`(#fCsg%E~ntX94 zEyGL{4Fh_y1&H-_sH5pDGY)9c>z}JiK;fpIT3yWvCK{$Q1O87JU< z2bY!pzb6tGM^O@G@w|V@?~CR?57qlw#FD*7M94RK546obz;~=+eBcG$>xmn)xK(8MwviBM}&<`YwTA+$mv(rFD6-;z=-(Kz$@!4nodqMf$<55)4Z%DO> zghw&YpS!TIJ1hrIU|vc|`)4gr`&n{NtQd>%Iq92Ssx{Jz2cqRv3>rk_V$tHW{jqh1 z&a#*TcOk%#rvu$GKoHKG{&GOtsfpUK&x}R}>)=<&X%o@&36lF${QY|@_I}PwC!=it z*K|Bve;3_7h?^W=?axV=lNoeRz1pAmJB4Z)yJs0b&m`cbKm4%Ol5_LtyQ;s;aqCJ^2EaPoGpR@v43VfDYZ(TNS+brlvqn6V_(-8yyo<=|4gt^1t>Scp$U&{c_G5 z9mn!&FPekjhOR!r;#9(_d0@wW?8vR&s^}TWff%q+ij61M-1o`vtf);?bq-;o!tpLt zda*b4fa$|$+TAl(k+#rPCogh6O_-BfH}8t_+A=0XRI&v7+k8-DU=I2_qw>w>dt@{ZeDu zPmgXbs@ohNNkwEON3kRk$S-CCv0b3rx5{OZT{_u4Wxx%Ql(C zZ~Z8}4t$POs@Ql^d0+^N+jcGv1joeWWX*rTGTa9RQ<*Y)Zx8H)6)oSkf1T_P)vM;D zV$9dGD^w!l6F!V+(8@@d|aNFmJ^U3Wm+7<$r4vkNDc@AVBqFwa} zUf8Aec?2<}`U>*J7jDSy9L$n+?3JceGNLfsJCnB10edxlsD|X(|!pC zx5hL41?(>%pl=A!PpLFnw`Ztmv+1=A^LLUbEz!wOKhjY|5MM{{kcgq3pTUvL_T+U0 z(R(mmkmx%ihx8)Cd(l;3d!O)mKCBc`++B;Yym>=#AJ}n#_@&o}QW%C_*wE3Kn(y7o zPm7@T3e~Dx1ZW87L1)l_X&q$BD`3j5)DskEA;2? zKW{f4Ac`NhPPE78Ei2F~*%l60rEMNS~cFKl_5W{%g;Bo(HHQWLI5yE+59|}|% z8i$8)Xz4xo5VYJPVW0z1IuKrm+jHK-9Xx@`hvoidON(;Q=7OF%;3Ze+HN|{<*f$CZ z3rjBLXj&Kr%_ITO6G|oFQ#fpFY&vg!N2L3Gx6o{C#Wdf~oTbC=Ji!TrA)dcKZzcrmiE94r;D{I&7E#8Ky{*wz)@=+u`H?{k z&H$DBOzt^QgKlHLB_I}8gN+sf5+zlt;$;$Clz%kFtTtBH@_GXXkQ_oxu!in-^2Db;pjM7&n_8=Owgj=hU*-0ta({F3-{P zp^Dy66!P2aYwEsSo9J|bY}726Oj1L_+&JIUqLCXqQf!a))T`PfmXO)fHLE|_25mRm z;j-#LH}t?ukOX>s)!Nl$Tyi={UfybSXmO}u}wF@cFq9-yTF$EWRjp>js3M}VNb6boK6}FgbKdNNl@fY$7qEMQ;j)PSq%%;v~@$(phBz* zKgIFIoMO2prPR9fz@d3M#7dxtA!wH{rlmqSMy(wyyg>92hEzrq{spVQ5k$K}! zE;~9`E^aC(`oV(wK3KWEWB9}tBzJ{EI+m7VG=F|g?56v#`)IL^*73a`aKN{(o&8~IxKta&I2E#hhNwWEyBf)W5vxX(9Oe9dNrGvdv>I0I{ zQugdedyM8x_$Cz%$5N?&gJCIidN_pSk6luIGA_=m!?9EXM5F}GJ|AwXy11Xk7r`~} z2~^1C)Pa&ySy}DJCx4*5$oFiS926{PSfHuWJuC+)k2}|s=*}|;+NL0I?O1SumsLmi zqU)=~vAkfIVpzmo>|nR7$i>>jfAcx6f9A40v%k_ zlY~GJ9cfE1@T5!tM)3?810&Njpkk%@|1EMw{(99ybd>6~Z)4AP-Di8gKhU77P??af z>zjia6{58L<#=wJ^%rMJDlgA_e8ta^nCx(H-L)EhVqHfacojvY8C_?3}C{JvlYt3pU{&EULa_(2k+N zZNJ5Y{Oxm`$ZIJo!pc-6lm!JaA9Ux z3#%}J4-JEEATn&@Xc*-=ol2Wv*2SX4`i>}Ih5tLLlBrE6r8(60-4Rc$SWe0aB+7K= z1eswp9RhE|91XjVeS{Cw)&w~Y$3mc?%|Q&trenfhYscdj%?YS=7m!e)q(NVE%};H| zcH1lX@k|9va7_eq@kGGyf}{NeW?c9j&EMbuUon6J$OhnI>*FNqCWUHvFdo@(JmwEw z#UgqgUzlHEz^;9Xt6hJx@g6cEA#0@`vTE$PZHiu{$Re0{dwmRJ%ffZFryo3!k6EYgl5N7N~!%Qh1csOh%m$y~Szk#p!~a6qDe zRO9pylCXsHykq9t z62nutBn4d>9^W1!ylGsH%80&_S;KSBwgt!^)HY6QG=6nN_Mf3$%ktGPq3k?qNk!n2 zeZQCDrrc@>>wNcyowNItgsShwaFedG$A|EQ((L;hEJ16*v)1@?GV>2ayE~|JmJpUH z3;Hmj4fZaSH)RB4Bq1SH2`+E@Rvc1~y1j)pNyAnovxRWd}$HFPY)1{|<$fsn&-s6KQ zP&wj5b>=>&6&Mp{{|(G6{EEAI-)6;!lPtX}^w6$(@YiR?^^(i{n zfMMr1phW~HSK>z%XB@BCH`Vz+0R9phDS!1yLlpG0UJr;L&k#~;khuaIgE$1GfPwLX zh;^=>{8uIJ{aKwwP~TN?@6=7uGOS48Ukn3E;UJRQU`>vYs!jrVU$nQ;l|dW<%#<(1 zh~VL+RIozj2`8v3=)jdBw^)+1oWg4|%Ab)gqj?m)U{Ez_GC`;uO7im%CK|>6F*{V$ z+;X0PxZjA4_~f_Kb1w4J zZe4B0$E&z#57^z%hRWkhN@GtV75TIMi7k>W(EwmnAI$y+xtx{*+h zWd_WA_#R^<40#`o;Ej0(m-G*urq};w*ABXIVBN%lVaPu+l-P4-T}GeY5GOtoso9h* zvE=`+{++zAh?`dz5ta7fr?kkm@|sTwB-G_PFVnCqG5WAu3$4&TquU(U|F`ek!5bHVhGW?Z)%4pktAA~3s1VT+RM zW~p|M*7>#MV<-)xawS2vj!58pidT4=3=765GcOfe0)C7JE|N}IPRvyx5pe?wPQp;` zN1(9@q(%VJ_YxQp=q!N!g!P}#XiMLW(IGASN>%%_MUilL?&&BIMefO}bkAz#S=m@l z$WQoG_srK-d*-dSHLYD(@%tb&+J?^8!5jFX^ovwXRKQy{go+&l3HFzuPh+b)9R)d9ARl-+(y#4p zj4?o1I0CVlF-4hS2U2!<1TTn-m35Tt_2bQU>M#YsyI?jo7-Q7q!el7Xqq+{Lu3_##u0qZd! zhxsf<3UarEhG@D{lz6aRz-I}v-%Y}kK1j=iUadeUrmXH-Kn9v_5n?!A5#mwjQwrng z)`xUG;db80a-P%hUkiQM>Yh~N^ohxpeC2tevm>URmpj$-+$aUr*P`Fm(pCc7IWRBE zpR^`+M-K59f!VSwaSEv_VUUht}N}VYf^F#h! zORBD8R{028bLu*i)MT?j`tM8O`2nd>UZ&@Vqci3v-UX(z!zmmVpbCy6;rmt_t`1y! zkf7OrMl&BCrKimC zhI5Oq*&<28IF3Qj1?W)N{;rPzowq!W!Goece&h!HCx0ETW7``ba1|A*{l~JP*LoyAz z_-cB1dH-qvA&jdV3U;LXR(Sk`bNirf?Dt(}n$DfbvH!}mdM|)YJJr&6-3X{EW`$W^ z_}{xgF8SmhFLO6JxPC2IU@*7!my!s7OC5o|o@aHdWNtue(B4`Mo6?uk$^2~5GB)Dgv2@a#anhOMSq(DB${v>VLRH=nyld_ygkPeFXB zUi;A0><;gs!J5m1O`RUu)Hd3)M;G-&u>EbMo?eq0iQOE>^jsW*!YM)eMrEOZZ)Wx@ zysFVKZiP7dLe(FmMHAYHUZF@$h;t4!c=v*_8_qJcZa z*$oi01w3aI6qJc8yOH00GE5Ql2{XrN1P+yUw1m+G$bpixr#|}p8ozAY3X@WcY3&K$ zymykH{%+QUD@QL=j1d=|OZwu!i#TK=a7PMRx`Oy!NiTb$9^IqYbIbXk0CxV$M=I73 ze4|JylYQRnRlmXA0?N2$KZ4O#4AXv;{wREPP1$8HgR@ zYvoRHIZ2OWeipFct67&&Dl%qboU0VxA4P(kJX1+c&}fo4e`rq=UnuafBk}HS!H>{m zNo$K_7iRcNw;#8yA`)Xgx9XH%>S2a$y51C8^sJiH>*YwP;7O^(bORPV5!VMKFs8Io zQ8%`CW|Je*m&mjiY|{9PS5;;9jSGL|H=7ThOLKOD5g$;RdSk}7l*KpvQdJYU<%1^q z*R)A#%o5fvS6u|=HCJ5oOFV|uq_)5}<4FnF7W5bIWgAaW9|)ETpAV7Wqlb9Z2fP;yn^ zpsO^MWrf%3;3&@J?8bjlRO}8P zwtwirL*mY(3Q;x^>Ox~e9>AU9Mzpd-Hksy*tcAH49Pc@H(v^z-f-773_dhF2Eg6pb zcx8_zaW-V@xo))TD*NpTdh^d=$Iy8RGQCn@icy^H91`ju83c6brV^iT2%%6pMyuzN zHa0AvfhgS9*9VBE=mDPkT-)f)PnZe2DZNS#!3oP`3tCKcdQJ9bAWlDf?g2YI(`JUO zf+8XWv@mj!Tyw<2_38qQ{6j4Rs9f7ui(ZKJjndy*)~-hz%B4MuofID}(WZ?(e^DN! zN8bn$)D+tPG`x&%3T5_k?)~iBF#F-Q=(5M+Di@WEszUI$%quCi{80Q$8&8h~i9J(= zRa+l&1IwsEqOt0Vz`_wcg;UK%_ZJwW7%exSTsQ*GMn9R*P_ZCl$I@gF=e|dUTEBR? zdMWXi{)0IUH#*nC_HAdr%Sw}>vi*7e z>DOR858E@ajE1jleK(oC`sCq0-f&byTT@$?(W{a!q2dbNvMX9U?R&k%?rK1jEl=i5 zx;3XJZu+0oGS>|W@;PoIUhlXT7QXeUp0Dh7GB;nK)|oc`;h5@-EqsRQ`PZa*&)U&P zadlH@5NOGcoNW!aT>jD<9BT}+ir`POk5iuwoao-(zxJH8Oag9cxKwZeQ*}xi=R#m) z4Z5SiDNF#ahE}in5yGJJC~X%RWxkvEz}GiQ+#p!m1v%m`x-^aP@VSm5e@Tjy)-ejR zg1>?_xEA3p_`s|<8e`DJ6{R*+NUc8j*YWbmjy7TH58Hj$^>EY=;x}Edk%`8j+)ZfA zX}?DQs{7`JYeOuDOy7Ga60T4;$uWVdV9D=_=#}jU)7f!Hh4LBF@@ne9m3j(h^-{+z z-ec*T)u+sMQ)klWy$D}ny8&R}^KMv7WkbU;U}dOyk@OcVbuDrc-;TnmSfz;FEL+s>;P|5(yj$c4+TQPJZ zj^=B=FZDq!gUxPjX+g^JQ#zI3H<+|FsOI!hktN^H!msfA9M>c=P4MgoN8yfItO(^F z)(AN5#XK}7N?RA8kF3;u8n^E1wzztaE9gWc&WTNJ-H++{-a4JWVG_Z6Uf*&ZO6bt_ z=(2K$52@C<;{Ebc1$I2R@u&Qdsh)Ml+GK0_Ubwp4oI^MFksqnA33Z;pQVv%2OQQV>g)XX#~b7_NSc~R z$qxA3?_$iW1dY_%pF==tG%u+miDY^`y@Yz0%~vE1RoqI3_unzn2a|e?WtNzEn>6FN zggi?v#Ie!H!wC7m%g+8g?Hk`WP91-0Xq}!7b4)KT`u6;x3Vbt^TTJ((c67n>_%k}| ze0{Vut4wSK+AaQv#NEr)>RsklFRbF{e1N+?tkl+$4o6NoiO5*liCt=b*E18t?Y@;0 zpZodS@ru+g4(LGk6ogs=>)w26?s)45@TH&oHRqncRvWxzZuUx!iSsI=NRvlKlpVjlMwQbi!r0VpmR}~K@V&3{4H1Pp z`?A6Np5k5phKITs%)W2))i&PXZWS#cSVp*ir$we@`GcvlmKj{ zeg{^iGRrvDv$t&GfUKw)s9?qTL?ANJ)qIFd|D&mCyF7NU5jM>~^*>zz5}igXp~^{q z0V`4OE%UgnKYdnB4HUb`3FG1^o@OnU(VX zC7vZjyb>o1E6}Q3AIa|z=;MkvBOQW2T9iiarXU;oiceJ&KKj1U7nWSVbgz;sFd_84 zn0z-gJNs;Hi-iP-;e%!nF4|#yt<1;~ES1w+lGAqTon7#aD@*Bx?CnMHqHpzl>+GpO zGO=XPjg7GxJlw{rAUF+Ve>@S_Yg*EjInx8jplwka@{!BW z+vPk|*N+3=9p7Ho;7h5rmeJ+(MB?LYVC8rSWjXd@&a503tU2;GgQ5}^@%=# z-;Otu3--fJhi9g{8(JIo|Lw!9m%5Gv#}$GV$D^Su_h=)9n_V9nC)`+QZt|HDI5Z(C zMVuZnn^P>Uvx6E63h1CiNDFFGvzf0d@3`NZ#IMeJAhWkfoGr5sy0|pfy0g8Zp}T*e zN)?ggvLcqxrX>{B6P;izi4&78 zi?*g062`cKVOH;_2s(Bs7)`8Wk>3RK^5W9YW=S=U7;E>xRvbPEFJ|Xb2l?Xr<92GV z9g1Iw35d8z-RNTcqAr4IqX3&iB=-y`$@Y#EI%PAOeG$ z!G4AiLh%S^-}GUPw0w6yyYti!@-aK9BBERD5hbxiW(sD>g6?t7G&WwEwi!rA)GQpn z6OI{0=v8`gaY$jZcIz`ZM$z3|=ih(dQf))^g!n(j?@Pif!BUj^m^Ko?~Wxi0R89PifD0F_qH`Eu^badnut3qJgI-@)VVK$ z#YNFXV)*zGe0P!14>yaWC7d!ICLvx*}-of;VWw!37JgS zWStuwLg2X~oz=nY3owpGyjpa@)H3v<1K!mK7u(Xr2{rZxRk2gl^*M zDL6$_i>qmq>xp-zaI5oVO-C^h2WKMm$bAm&!z?hU7rxZoH70S-&9%E-T?NDVrK)@K z($pfp68*?M9XY<_XQa`bOcrnbS6KhQdAb%jA9NxjS@U%k9@cgnNmi<}#P4tZ9I|Q~ z^iL@)SVnx8x$>kywUf{Rv+-hd zsr-L)!?ex^IJyp#mGa4ufa6E)li3xybltVk5|dlN|LWV&E4DhjTY1U`6XQ-mnAJnJ zY1^3d$FnBs_2~u?k2PK^JE1=U8aaK->U&7Ps^^x}pj>S4pV4YJRNu3gBljtrixCM9 zON@kq`Uf6SN(9BodV6q+ZL}Ovyy&{raq=>&KVqM1*@6P(rWllz4KY9ST8F$BRb=@@ zWi~6&B9q2MUCQX+meQO?Ii9G%x@TgF1((pd9LUdu$exkNU6PYzWh}ih0Sn%?f=|#ctjYo}3#Y*Sh>&`IfI=`yQPZ!@$sR_lR;k5m%b3%TU>yw@Fj02*ON<1 zj`l71=ylxTRYJ-U!{`PujVsVDVloItA^z{4BV$ZWOQXAJpYrf@2oRB zw&Qu&FnbN;@w|X@_lcWo$Xztwa<1((eYW?R%taQW98$TzlQ{M!zVw)|-btI!YUu!- zk|

#>5Po4PUQX9c@)RQ<|gUtf>3DG67~!yOwUNaO^&;6Y0*yVigM8k3I&lB?{~H zdL_&?g&H5H78%5u(gRw`sZ2lT@>ts;vReo5epfTFh^W}uY|s4sHAW*IFB)xZXE+5P zL(`t;rF^rsFTL7lhPd4k%3jUaH6?M5{Lc9$mEeHR@TNe@IAZ)WN4URMiJTxtD@6Y~ zzc@r4gq>lD8KAlx!)CM(n*OEE6T(eWJ|B#-anrfRYvrh!^Ur>!zQ{ST#L zTUJmM6m%?V_-HXqUpOQ#!I4|hz$R6hTQO`_LEM};&7Z^FU_Yr4rO0MSkmdcqGDeZ9 zv~aUn4-RvMZC(ZAxE*eqJ)wEgz$I^fYN}`D@h=~H*S+)G7}9wrQ(kWFliU3&m&ZaG zApKRcWXG+r>Kr%>-?Yel{fJ3>zAa1chn374$clUW#dXy57g4fpSmmYWdD9TR1eu%B*+FVjDjIySjLA}M3LVUrQu40BIlJK9EBn(WY{=slVOhqj+ zXCqy7>@R%J%{{NhuYKu!@1f;8EnHnw(|K6Q zKWOFncw%GG9@3YBRBQ-0oFcnNLc7~?=s@yWCa4lix``{Ljbu86XJO%Sh1l++P#Z=G zK4h+wzE5Y>Er`8Btsw~+LTFnW+Lu9_VGUpEUdDHwNl!OlS=dJ6B!J=g8grpD&8|(6 zLg=q<$OflFbfG8`Bvnx+8Bsx+Ib-XHc+AEfshyqgi$a2w*#u$GtGiF^8t)}l@V2BE zy~a;lg3QlcA)C91Pgy$JwET9dn-8%Sgo>ZIdt@CfEo~(Gn7Q>XlV33li-EiSs!lyiX0rEwZZns;F7XJ zwH&*%njYA19GE$ZnzE0bxT1s*%0hEpDPgL(VhhC#&J2`eYWd&{R?sZ z3zg?mJwPDU#knUB;qYg%vJY@lQ@j}Y`vN1}>$LBMQ;R<68N#+L`OlaZt1>0C8^~H&F+B^^ z3=R$Lg`^#gavCo$By48~zIdX$8JdM7lQ%z74ZDcD7z^xi^)wBgt74UEr0C>QO>p_- zSGTL2fWD2+Wrc=ZruSRXd2{Sz7H@gW;w07}xJ8~`J7i<>Qp7_)K`$YilXh&u9`Gnp z9twQ-S&Z2wYcbOGbG2Ac9LB&P3%kT({mm#-X#8&a(FL`iHqTQpYm~H$avG6G#%9qI zCDXdE3Itz?zKxSPv9&K+#=Y+n__R>kmvKePk4yl?>e)(QYDzT(E7Pf;$oJ63=$SjI zuC7jc;-o5@z=Yei0O{`0&&;a!7t+U(ap9NsllxS`%^_gbX-~qB@pi*@PUZ8oJq^QZ zul@deBJ!b^dC76lc~d^AhebA9^D&3!VK;OzT)`OLoa;pE>a7cq$@M)OWYBjR<-x

{m&V~(>%p7xz@#W(DM;`m_+MemYhTes5xl9zNchQLMnRQ! zB~rih_M*S3Vquil^5_AkzZf~kKB=NSM5aW%QHk^8VP1~&xL_0yTS8cFH#c20)!>T- z+Qe`q;W3)I7vKspYjR=7T1`Go=Knn%;I4~@1ih^k99hX}hV#M9ZMmQBqCB05C49-|QJUq|-TLA5IS@zmqb0zoN9OXm< zu(3GUh=8QQ2UK>mIHw zwvuty&aRmu*bt-bQi1A```vGQ2c%ur<;>9G9Y4&--dC!U^~`dV3e*v*JTH8+29 z#GgosCA3WmP^)sG+FSUtDXkLX|5lVzR^cXutOl(~9G%xeIS&UGzy;oYsQJ`06YwRt z0BAZUaU`fP z1a}dxN-m7>a!!NwYDo5FNKuGok2tqUR#a;ir9v+3{4w>o3 z48DbHLr9FXmiX+@J0Xlj^GTYA+Q9FnyqJH<_v0XCQfi;fjFy8Sf3JkPJLKEVz*(j^ z9O9s$Ej>E9MrmukE72U>fB`EWJ1%*ao#%U&l&DE>RW$O0Mw1?==MJPP?IvV!mj<3@ zY$rRgZ|u+2UCUY+2`EV?Br(@6y;}}yhrt*!9v&Xh@co4=A|)kITKeCj&_EFwVy^gW z9aCt5EhY>AohYJLVZb0L-$T`-b89%|9CVn@7hT5Pz*OX|tn!C`Z}llzr0 z(iTHb^uqauFuJ9jE+^;<2-kN*u@~8_KhTk!VL2^|3X6*lMN7!u zUeyHc|Eqwe8hChnl&WBtXj=a0CFCqq?Wj})4e03TEV@oBya8mP_H#vFQHmFao>V}Q z;z-#GWvg4&JTfG$$JFJ}&Bw0-IpwI^=KQNFUJ3vIV2WW*h3ofeoBb?wTFX#;kIJh8 z;!AyEH=DSPb_@NZd@E4{(*mVUHQG_2f)56A4358aEZl}t-}RaY-{s&5-*^nRwp(e@w2*;l3j~ndK=L?(5`k6gj-Z`h9!5NKCqPNp@JC*!t>row46k@j7QgwjU!yfragD~ei18y zf@y3_yFZz}(rb9ft!v5T?b-5WtU_QSD5n6PK5^GYIS!-s7XjAz1q5D_C-T)oPVh7W zjkkoKGUwHB;{e!~&^Pa%be|!Ff#v-=1!0|uMX0Ci7b9NpoX;`ojP6=is4t+x*;*?mJ&i9y{9i5wjJhYiVzUv1qf( z=rO~S`K^rZOH=3YSJXBW$k{c{`Tqkg4BoOLe0)m$U?8yg$Rb@tJG+8LL^N>4T3S>T z`aCBFEN^l^xOMy8{Ctf(++GTwbi-!x{+Yyrc^iWxd|#Mbta~;!op5Csh_0b*0Iwv5 z`Q&BD_1Cs}U60e98}G8lFjgdkAiem*sBH72>fmfuXvRnAP!YIw##+4&MHaPRRw+B_ zl<9Hb6MVWbt##p;B%Pbl+xLSK3Tv*k+1q5QDl}21(g_U{luW{4ecI-K_$1Cr7a76z z1QO`RaMI-7N&hhnPUD4Ua#v6saK)pnW0#UU=w3d-A^g=?+WleAZd*n3TVPzpZ~BO1 zv`;oLw|`J%YcWMJV$+fVR_r|X%KJ{PPSv%X3NxzcBIO9}XT zpjmQ%%j z_RzAiRIhGf(#6RiHQdS-GH(0GrU@*Y{FlokjgCBQ5tNc#2$`!oEDVvigR1{9;^7ld z;~7s+8&Pf^NQXzDm2{`aMxM>NdohTlI2hqAw|nJ6m8?<#mBSRhrqt4Rg15+YdRr1T zpLTWzFLoWey0=f8;}1vC$)M8+^NT$&_5~zgYNF~j*JYRSq5$Uy!la7^t{fa>Pp`&8 zYNS{Zn9|hE+Z@T`$6OD}li%byxa=fsr~H+#Ep&K%tCvwij;2UMqj@;5IfoS-DhS^E zwJO_oOUEl_a9qH>HlT-ALL4rzxf=sg8|T(zkYb` zOX6@kkIkpP_YB^|OFc7HD2ASIYfF*$a86E)D*hi)UlmYSlmr>v-66QUTOhc5aF^i0 z-Q9vqaCbs*hu|LE!b2YJ?iy?_vopKDeDUt-)2F+-x=I0b>>wl*w2eZDjlH%nb?R&T>M}b;IK6(`mOzT|KtfdRCduh%IWPmZ0@UPsvX5 zVNDXkS{i}3`g(s~NQ zB*#|PLs-Fw#0iT~>}kOY7gC>CkhS4Z^JP1yBqLE3Ge-BV1-w{874bf0x`m#F(ejga zJi{J&ZrM0(p(gD@JMV_eV@}8pU>|SApKQm!LQ?8X!f9)3d%jBR97vo(a226}>7i)vAuCBx#DzlRSCIr#?!%$?dRLgW( z&Bjzfh%w6ZVNDX6P+))%bY$z!UY>~kTAhG^T#4#C<@V-iKDwg%_piC3RbYkuot(_O z>WT$$&`ukj;;AE`q++mR@DVt_6}S>J=}~x{@@v<9T+4*I-WQolqt@ZCwUY*$hR}OH zy8Wz3e10J~m^=St$oW)#q6;B}SL}0F!>+Fjg9HN7J*t5>`Ns%lxq=SV9ihvm(%PXk z@XO7lQj!d@(!W$_Hf$B$O#97H|xnm{ONayh6#($SUBg!+z4UFczfa(j!&COKkn=x*1H(*;J5B@XkSxz zTovwAX%#eL)2+U-!`5DKraRT`ymt2`BgH=hi}D2>hALmW-~42ua4YB0WhT#9FF!4w zC#@O;Erbz4M~_OhPuwcdTfzHSNB+KS<+_>`&>R*0gWY^2T{k}^B@fbWp=PLS>X4rG zVzy$b?75tzYr+t9$c&KThR6@o+FbU9g5qE)xqo<*I)X6Nqwxxeorj)Is4Y78MVvjv!G|5*=EGWUBtyB z;YjpUzw%k23r((W(6$I!Pt$^7vfX%87FbhzLVj{_)pJMg2k7k zWLZ+aO{?-^2UJ@g(C zCKagfak9bQtHEIcQ&`0{b}RCsx&Rs&T<}Wg)FWszf=WSL)QQFU^tEvP#9ogH#W0%F^b1=&{rBiN-;hXNbq58d5;toDga z`taTD#LidL>#eZ1&67HgXQQxd&2~hVjO=J8BY7VSZgfq}qT)Sgp$57T0okP(Gb_Iq- zjb~_sFp4HJjXw-35f1Wb4}TzODAu;2p6F(8Art%1eSe%0+}Yz8$S<&(v;1Jp@`Mch zp?D$wRBwSC35mVlBA?oZ&4Ai?OazHp6s@M`%OhEH>SH=f#c}KR)Iv!R#W;L;0F%^) zzC=9=LnZ0OGVX9^R$w8`7(E7BGm#g@jv&uBy!9KJkIwAJ< zJNx!VBNv}6Xn79DmX6AtR&4tp$`W*Z^*_ z#d!LR^@MtH@_YaLuJy-Q(~6}YfaxA$2` zh_4~u>{ZuM02&IBY?XOQosq6)T|fpAU0%%Xy80aT6*}xNX>3@;FteE4>}n*Ot}0Q} zxBA1tb~@6YSS=1R)KHF%8(Ga_)QTa5673>!V2>k8FY+@!0i7tH-vP5Y29DlL61P<^ z_4f_c(CNyPuT)knNnUTUi996ZR6y(ew^JP?v!;iFgMz83y-S5Whv@HMlhE-SS?UJ+ zW<2R(6>={5FC2^wBlWSnh*{o1ft6T&7ou30qlHn*-y#6-;usmleZB#dk*kvgox68# z-?xJ5w&z)WV7LlqXsgo4yp8^VHn1dAPZ#__VN!KOzrFv*1;|{LPGy8Dn~>$hC=gnc zq4NqHYY!AtW+A8!Np?aCt3>UX%yH{dr8nb(yu@#8Zgks=l}aSi6ce~BEBfX~tpP;sFk21TkW5$=>^ z0El_3NNP2X&_Maj|O9cjTSk>=*+XV)uBxfJd|;9j(Fk0ejsdY4aK+DST-@M z-oXtbyVWkWaJWe#D*QREX+|P0DtfgGS!=ydZWM!}_x!>i+N_g?G(}7C!tun6aof(j+>tDa}V3JMy`HqxFxB>wIqwAsc%{UM`Xms{6_*kuBM+u5l=r`tn;RSsE*COZ2d^~lSRW5TdqwC+%@ss2H^ z0R0)oPM-en0y&n`PP+q{uaK2JBqaOoqMzNLCIXx&pAD!GvN9qAES(W%Fvj6XX%&efQ@@M4pj?U(zR~2=Jc9 zQ_X9NCO_@$hr70iIw#?BC00kR3!$OF2F0pM8$DAR;ws{Dq~opTR2$ zQ9tl|DtQcDsRxx0Y)tPbhodaZ(;w9k@cPYuM-Cr@hY<|R@(~y*y+lfT3N)XAd|%c9 z7r?qyOHkd4%+e0}W?zx5|BLdrL#Hcn&y(>n_hWIgEvn{8Q!IKgXMM!gDMT8k;{)0! z!#5EGt(&%V9Vx^_E1zze{K}H-D-dyUagfATQ`;fl_YKxVxOt6QH>}WLP4P{UPleK~ zj$ULK(ma2~OhdTS!AMXMB1(pqq^KoWKdFn~p?x|7T?!gaBbI)?bIWAy2jsj>1ZV9YEqP}Bmjr>{~8kc~!x%2J3R$j8s zka5a2Go7sfs8cS^Hrwin`VMuJ%VZNa7L*h3wrH##xzo|B3Dp)4>i=aITx_)crs z681u+f2cLJgL8lNI^LRUxvse0lIyrKViH@AeW(j4qVS8)a_8#X&+_2OxkL=d)lJVSq&$z1rd_@l*EW@cu=$DJSV7FIR>w-tOJ ziGTi>7FGUyiw$TZ$pgWeb%eTls7f>N15*(mnQQ`OA2kWvT>G9jDOj;-`eR*vvX8lV z>D2FnoGXZfr#w56~myXiL^UZeDLY_*ANVGDY9A41eDu!o36l# z5<3V}_pmS+D?*Oc53BK;lPf-3eQ}3$M;HFErkG`l=q<8CH$h`V@R+imCr~55>_9xi z>4k~M5w{u_Z$e3p64WEf5+sJf^+3EiV~+g^ZAn8ie2Cxai-tjsK%CBr=_{w>G3hS= z-!sbAjr}JJvmJW0=un2+;ne-Wn4@+h#Lys9J6|EH~`Z&YBd(@ zY@uJexIJ$7YfMO=6T+k=X`n3$*aSz%=Q z>q8^9FH4<(F|u)s4uts!dh*Q0>qle#fDr;}m2^~ZJL=hE$#@BL6gQtKW?HnG7HxyOpf(pg+^YhU zXWOLAPyhy!s9$B`luftstx(hVFmz<4^Jq1J4v9!6txtn%&~Am8Kbm{$(5)XR1~%D0|bh#I~|zIkc0zV)zNo;R{Zz)MG#CYY(ts49IzXEt{BQLq2QWMg@k+4|gry ztJd4O<=$k@SSGR9h-E<4xu$~94W<5Bli9T0?mP+2m>UE|mLd{xQj>m2 zaKIx_=xPU+3UY0%=#Idz3po0H;W-}-+4~TjLUW&11fY30j_;(vVoJ&7XOd-UtP|U<*5WA4O^w2)$Rzm zH@Xbp>K#2{+-xDtS>p$CDyZ8&SuZh@%l;7l~f_i-hj}(wK(}5 z%lr!hIrpRR<~OCiIEHFg=?>b|bPn~S3C9d?A2vFrK)Ebe)qLKC*|4EhM5eDjP6}*) zfoc9(9zJ?%1s+}E9sQNV4dJwW0VQ0AZe^JVHpA`1$0&fu z?uif0fzI#stBW07uGi*zv*|I(MBHRFjLk1IzqX~g7LG4xcpv|NrR1vE6lQNRLXt9W zAHeO~^d0#djoM|*p~!|(dy1hAt~W<|H`-b-@E|VBan~)IV8x9559a!%NZMtDI?Z*t zb#(KZFP{o-dD0#=*_|=LQaNy$Z~&y3P6UKrwfPqxb+Mj@90|Fh=SZMle{hp`+M){i5{ z&ok=2HBVbo!72>F$yc=@!}QiCDNVMqc$PY+46v4W!&!MDHlwb#G_{={A2*-SQ7IQ{ zq>u^f?o+f>#pY3W;t(C%Fhat~!ea$DTv#6!>it)H_xIiV4mb;n9iEvGw6AlM%K?U@nJKz% z2cn3G_avOOXjNJc$G;8#lgepWURZlCFxd9FN3s`;4^>;=ggmd^MsA`_MgeZC(h0+5 zjjQNw9nGk2b3JuyY>8V1`_9^KH&s&XgdQJrCn-zkcG_`R62>=*TdEl7Ws7#F;RsWIOxzAFo$j? zOI;1-|FT`)Uip#@w2gyGjLsKq{OUOGtr^Ewu5*s{J%Ft^y%_)+{uJxn)?1%l^SSZn z-_!a=ukgv|k<%Y&-Op8p^#S)&Q=SlKk9B)4TD=#EzrgJ{yjuN7vBvjgT-7>Iy!u55 z|H$si(xfs3VRQ-aKUV_^o#yGch{%*u| zsn*A@EWDo>!SwVa<7=0-%}f-Re4lVVG`qk^4V@t@lHE3%Us=;b7;30=RfF7#^_91w z5*xF5Bb%!tY0>N)MDzrPF~s1VY&jJN^9xiA5iIkdYPu@D{`qUyL4qoSb!u!TGb$g) z_p`$yR()ExODv*GJBj~k22at>k3`JIF&Ssig(aevMSCnjb}9*QpZkrNrKxXr1YiwmF$XMc6|E(pb@hr51V0zN=?2l8C1l!NPNp^GH21!RHWWmzRz*m zG$Xn`7A_XTEI>05-&RnD*8>mCA#i*qfL?`gEr#m!gwVzvJq_yct+jP+MIdbUOUTJM zZM=*cp61TVF>Wiz&`@2ovw99YHa;5kdGp!16R{r5&FemCa9qxaT7iUzZYNb+!-GW$ zR0f@Ck2%DTM)aTsG(7QBQzRT%Jg@ES^+Mg>F<*jvVVu33yK=ZPOfVCFTZ+I{NI5>T3+(ZDv z6JV+J5dg2hUTpMsHXAt|9D>;P&d-9(l34uW?XqGdU6#_d!grTku8p+>r9gfB2BeL4 z-z6ixUR;a04<)Xqv!4y2o2!tmmUygfNYA}dlgxD*rM5$#g;snRR^P*Zd}LfTCKFzK zxL0v*6Hj>l`4r>9lb^{5CE)4D9%GBO{aaVvw&q)t4Hu_{^_mXk?ShRItEvymmBcy2 z)xH`b6O9RVIebdO>4>qUCvn!NZ|*U5O%o@Xj}MBZ`zbj})gDuwIJ%fs^k4NJTsZ0_Mx@dufUYOse*<=Px zR=<6hR7ue|DfjKHtSPKK?|re2lE)b=@gd#5XACCqtjQYxa1mxaKGJy~^nI%Pe!6uQ ziT4@4x^{Wuf3rMC!lyK`lABX4U>;r&8~gHU-;viZy`NdbE<87*ax?91pM1Q_oZZ6I03eJttO{*s*B zUqhwpQ{uMWlikl(&Ks@Eg4Mgw|upqSe6-wdURHm7_faKPP2nsDnt2qiW~N z=c&NEpOzy_k(1A-9XNG!<#a8VtNTI$G@r!Vg@^bxnp34b%RPsK+Xs0%qT#gP%d2aTfx~3vx$E@YurrYq+AZc)OB+SEIYBc)Xg~;wR4B-O_eGq@wqDgrfl~pb@ z7q}zYuE96QGBA7SYCOEK`ErfQg&6Ijg9#7?4gXyBz|3vPuLu4Zq8H5VfAgU#sC$0~ z;`k*cFH*8L@9%r1xi2QO94C{{hG~At0#sO8X=%7L19yU@B_C_g<~$zzl|N&w-Qxt7 z*}Z`fP&;?erh%}t)@7gEO~tW#3${mi0t>XOK>&MWp&zFX^xEo|c{@d)m z)VrjCJbpI({2Q%3Cc4y=>E~O*yxlQ90``}ksbL52NF4-V71%ZsgsIe+Sl@2bTtSD5 zeUg5E*8J0uEtK8y&!i?+I7Ehyipq{c^rxz?RUL-;`_Wg#-(Ut+=1K3|01>m1wY+ts zaD=6$gda$h@+L=ko6pu!g66?I9wEllidQfdUgS&x~0V5Yjxn zP16|4s%)Iu`8X8Hud@A;;ee%C;)ZObQHX6U`PGsc)8g#MXk_Ql1)`?PB zU8J2!{XL!MA#ZH8wfn5xZ0^Bn+y7V_|YMj;vm!j5UEfBV|RM(&t2lFC{_h z%Z1#SMQxN(kl|W4CfYiQjm79<-?7x~tG9;qu<;B_y>8GZb+K0Gi*<3wJrO$mC+^RK zM}HO3qJuR!P_iLF#(GYaczismIWqVF=qL?S5BB5+yv4hrKs3!eA z0O<-t?KeV3-Gx)PY$;Rg=+51NT9;nEDsK5K&4l_S1%?cf>G_bhz_H!&G;i-KX&Ifi z!pi3h()(XFag*&$agz;z^*VXzLC{oy?3ZjfMy(z_2dl!woCX7Mvrd~cHNZ#tI+{!m zNL)D%yCCh`biLV4IQS!=t=5#+94|!;f^|{{JG`!c0t?TD(JU0aye$jWvNtSM+=-q^ zvA2p$>#EXzdZCqpc@Tq-$vbA47v4S1?7R*G4OT2tOu;!?BEP?(16EBo4(lz#B3m&o zk;RvWw*rTA+hv#T0vY7 z&OR;r>e=>+Q|#e$b&f-Na^K`=F9SUsN@PQ6LiI_!&8;&etKe=7yJ92jMkQkI_Yk`u zgo43~Wh}7f#(3DKp;&%%r|oe*b80>vV+e-dlS*z{?6A2UngdYB z{+<8k{{}v*h!H@F*o++R-CtOFZh~qbJ65rd?j;&C?)?;EMrgqr5hR&F9D0Xu`%PWo zSF{>#N7KilTbEx6!oR|qnI~OF8pTd4^zYdSfAGdoD>7=YH%ESSz{S&4u<$%11GQ*e z_k}MkVwN9s`1|-oO^Th7;Uz17m2ku!1MwE35Vde1cyjVLSuwSB8J^9N#W7$KA_4!} zRyxjTAoyHe0!-cw-?5hCAOc8*4FC&Yqf-H|E&y`$+g58$>vCAQccEs1=@p&?*eK9f za?H+YIOk=5pY8z(cHd|HiKKU9t>VkzW?2(Wu0lgwc^C=`Gm!TX>pAEqC&}i{`?&y8 zb9^IWbfjV%b`i6}??%>7=&riTAC}o<(D3D+PmtIMk1wYDQHttNF`dc+*c0$wI7aKI zh2Ai9o~v7QnoB83%Bo9pJQ*ar`vj%5QdJUgl3>n(9^vEy?8BYKdv9xN3MF^}_w(np z@7cGD$GEr!`MJ82L9B#rIy%|ghAH7N4#0r8h zrI#P6W%asaIE2>UC@kzU#Mfg~l~_rX{}AUhX)D!~V9=8$QrA+!DU2w;)cJ-*(g8v-|0)hKWE3e_gquT(-`+V$S`m zUFTQ7Z5Ep~ds7>$xD)@iFdg(O`et6ZxU&4}i5`&j9wCZ38(ll;W0$^uxa^xNuy(eZRr`3^2tDXB6Bp_$$Wy&@|Hoc_N3NMXYGAlhv3IZvE z;EHG@4l>&O!xiJg_8w0M9&QF6ZfjT^aV`iKO(Ng=CF#6zO1#?4(c+$Uw^I~7uSRZY?F3y|SeC-0wsoejI_Jb`;*N0kZ$rMT`K~{dteKd$Ypm zg#nPdTRqk;+1QDU^1ooS`X3j7bQMbYv0$(B(Q)BENAKDp9VkGe0A__FU{}rQ_s{j$ zuV1LU=HFk8mw2iY@v{hZ0g(@Jf~a3xg8$6SH?BWE5M{Y~Q6JWKCCC*v^Nwpq?F=-y zt5cG)+Uos~DYLSTo?aoL7N-2O__}+;Iy#u(jtxAA-emZy$fOAU(va;dS(vCcnpJ_! z^O?(pi>-)zohSfC+B>}|ys*TWEi_nm6QR(pX5PWw(J(&{v`83EK-v>X5fbh`@efv# z;uOwVR&ljP6%xEZh#^m$o|o^RWB5bZ43$GynaxS+`cOLHMY20W)vO_KDhot*-{MbntFP~h{Ft`L#hg8~x zJ> zxgY~^z|-OyV9zp#kjm+6On2~|vG0rJmyStW6mAnqKO${iy9#wpE{{;Fs1wVjEAnEy z9|dbjKgZ)$62ReLw(_;3{neCA8Ayh!&nrAfpEy2e6l01~IPs=3Iu|SJeT4Ym2YBBs zv*`MVyDb$EJwDz+;!uXI@JY)g{i2A}YHu>+$sa{i*JX7=MZ5T6=ZA9o<*fXQV?PQu zw(FdP&PPr9u|j6Tvfu0q2Cc;XD#^R%YUuj?>&>e18c;e=GQTsBW#-^#Wyj<>Em&NZTg8+i$O4>@u22B zwIImbGQASJ6(zdEn25lTcSGQBTC=hpyXPg8r$cPSYnY*ZMpA^eh#LwOvk$n95!O-* zL%FwG2sCblWXjM(ITwr-_un#>g^QQ>dcbJiWTKuCVL@^C^ldTQ$A5}Zu|@0S`jgGW z9dGvFCCTq~YmLj@W3}g(^8i`_#%Q2xdqy`5Tg0m(&y-Qcvo}>UO}FBZEM1q+%T}m| z^j~37!?igskYG@3rIa)E2@PDBNSLL7O~p>wJQhW>dVgi{&A`*Sfymu+9*fK35VgZF zKVbX$`|UW~N4!^Qkm+aCaeJPdE`~U)4lPVN0;OH{@_A~H;XRSiwko;vzkEVi7Go5) zb-*iNP9_#7vRBEq+rED$w72#uN$` zoK>=F#~pj4RJ%~#;T$q^osNbJdTbIzV;F`}^wJ%wHpXn2kyv{@L~P<>V8x|%V+*V1 zUdG~@syj+-lY8|HS9ah-C3$TZ#1_5hE#>~or-*% zymda=~0g7;pA)Alt>a#avjZd7&{-&$)021O ziTfwK|9Un<-uBK{0$2q?NPp$2ijW2L=W66x&&X0^ZTir= zhNe1A|Hk$Y$#CXP8_WOT8Rp^Lp|!TJjcI#-MDw`xIC?DcBnG4L_MYHm{t~166DWDy zZkVO2#CGCD-w2|b1>n;=zQ3W|j}T><-jA9St^S#>k{ljX&A;=Y#+Rd&VPxU|BY z5<2#{{G#2Bg^S8S`fK|v2IRgP@SyC=6Gum(_OEDQK9J33i*j0b8lcGpK7nq}G5=R^ z^#LTnGN!Q;c7RE!*Zft{0KMhV+^sYEHa4n$B{zI%86gA(jSdQ&6VJn`a)>*AFpSTg zmF+*I+0MBPUoc@@X18tpa3Djc3Ho-VTTq~AN|CNG_wtrGa-Uy7N1tZ@p;C1-^~TUAxWB`*$1A$2ZvMJg++V`DXO&Z?P*xV1qb212u+WNpdQ0;N#<`S%uDHGnN6K3WiHopa zATaZ0TJ9rmg(T+)wqMo*e=k$Kau&lMnz{Ux_&sz&3)XDv}gW-t2(?ae|Oz#w^xQ2N_IBG z(qz{t?z9TJiXD@PJOqX%7xCHZv*YT)13_*nlK zS@2J9_|JTmSkTmWM6Hr3GC<=xoi15{ctgny9DayxhsO`JH3Yx~59PCX0i;0&k8hoq z%+ofVZ_gSn=6`9t)1m?NCpRhzk%2Kj4t>KjRiUih9vP^h$?a_9oEr{)QIIxWSCvjK;*+HV@g%b9a^Lx+u-E_84GTQU3ZU13J*Ub%&c&3kH-}WX+ z{e1gZEjc;a5qQny-&VwJecB_fD8kI8fld)Q53OcQQcqAxkI6rD>S7TylD4i9XwD0o zEL_d$L*x+uAU+f~MesR!ao~F$5iI@=IzIOe)E1QWmo@5gx(~_bszozc;N?CL_{Swx z>6g2{b`8&OY0&3}hhpIi}E%_4)l`BYfoiFatLm%j{zlqp~-;_MsmMq9DRDQ@>yPaY2yU*3$ps zQlXP`=JJmpXO7eQA9P}>gDd>di-yndOoeCQIztjtt60I!>#y7Jo4~%!fYTHI;pHiJ zu^JFg(lYW(`R`8lnalGKO6H2-pFat**FjfaD-xKps@6!@jl)>3RG3d|DkoAl7rzb6 zV0@7o=!@&g;JOzY&QZena@)jX`0OynhmcFpPRm;Tq&}Al4+N?ynV5e3MZNAvzrVS5ceh7D< zdajuV45OzbVs&Jai4v`V6jpx=3hK6kPTRVG6Hi7Lqku#s!CRsIb$8MQ0~dOn4Ev#Y zgsZENysf2xm+Z?lwA~}8Cx1JvEzfPIUL33*E-n(Rqveg7bB3c&O|~+nCNtg#oZ%!b zAb^K5!LAROUdU4#ay=#v9H1e zx`$6WyJW=`8K*vX1m4>6cjN97D9R@z}s%42&{9pV7yReTy z;-3I^q}rzj&k+%EwDeI~g)1_I@lu(9Vw%hTKG(9`oHze$YAOhLs z`s-eg$UW@lu(=T;6S|9}x%FMkxCoLDm=w_QJu;k4KB5ZSE{z|B-Pc)NQ%%xcyAjOm zwj}S9R9Z~)o*8S4mIdUwdT{&X0m(Q!OgLG*XnDQ6@hVMvxN3;`A@NPHO1DQ!cIDDR*HQ$_!>V)yK%GD zI*$!zs}e4P&qDdmN@m;P3O8=JbL+I74*BS&=6!7GO~5MPz0h$YLXS5)MR3%-8$@3L zq%)`B%|t|ufpEbx6lE7g(e`J3MOIsZCGVJrPp$`4jmzf#zwBZLXw-j^d}wgvu~$aW z>5A0n`UV|-Xu4!Ek@T%BiO|@zNXn*ZYMM9K6?Ymu$%Xxbgd<`2+-F%ksfy{k~wvgvutxiRmUR#?4WR%CsgkP)y1o8LO zi`Jdmieu4MpY=sI8sLz9cO4A8F&`nqeD6li6RV65>tmn0@zyBq-Tc=qmMQ{Ft-D2^ z7t)3rJ}cQfEdGgd`vR)b9ohR$(+uKO1^<P z%GvQRu@~;RU8p+;lpxc-h&gFts+OSrOHIrMZ19#X=a!M5Il+1HcO?SD0FiIL|LlSh zpO};n=g%aDh#vk(Y&tXN7li$<>c0s*R`dQ?b>wlvN$~k4e6=m1Xizim`~@mcSR>LC zSJZCka@seAV2wFLDq_GcVl3t!yi{WN4+qF;{JmH5dOyXHyB&(HsPxwKO3vYh?` zM3nh>-q-i2Vpza#(4M9VjLCigjt!SLbISKtmjWdlmDkuI(jpc#55CUvxxhIfjVgC0 zF-i&j$rIKcn{BbI->O%`SE~m$RP45<0=y!FwgdV@M{{1nvv_^SQ=PQfs{QHnDjAE4 zxaDRxf*x))dF216vKgyuEY`rRmlvi}j#C<1 ziJa~nEvrukECNqv9QV!<WnkElxf?omM~2c@muq!y(*V`p!xal4{GE`KY;~RDy-L7i9_$=KK#>$!`dv+@(|`-1hN-UY*(~Ha z<_s_ZP~-%ZlqK?w!NOsPv}9@Ah04hP%Tnlove90K<)4v}%|0a7lS_9NyliipY(We| zKOB;FWzJnONhym3CpR~@cM|!#NH!p|aJtZjgD0;I00zUD4nKvp6uM)C*Kg2CC-cU# z+r|jIm$W-zZ4EB8UYnLCrbx5J_Ty=va(xNVa(tI2;gpDn&9%;-?{*M=pdDpH>hP=I z5Dy)egfVFX+UqsOes)B%^5SS>rpbL-UpAg+pv}+fW8LM*^@xi8E;O2`QM8amW2yE) zg)c8{FKk1049Yy7P5N(YN?FEbzT8j6B>EHrtraZD|hX)=D}fY(V+M~6?ezqfDJ zq$i@WZy&Q6EamI}bR}X~h14!~78d{XhS<~dy{hjndHUn+)u=R&Jg)$`yCtcXKs017 zxB8dFeI{?X1eyLJDbG{N30l4OwE{fZo{o8y+%keM&F6QxTZ{keIlXml_NMZt0FTPH zv){*mCMH&EPM>bIvbl9Q-#({5k&OGnm7V}1fPjo*X>)U`z#Y2bA}wZIfN3AoVQnPuPmYN=9GP5NHHKE+q(Y?WQXorqy@EOj7$mlYN0igxq#@t07Kf zpubJuY^(5f{HNa-2fir%7s$E&W@D29cp(AE{_aV`)X=H>`NEw&uR^Ex1D1=-aL-<{Q$WtNAXF+b;&X=5(Utb`YV8OZs{#QYlcdQlbbu*-YG2 zz_(V^O;2(K3nE^v4S8eM`k#7$g0DQY)clfF4O+?%v{HmfXpqA>L_FN5F>vG3Gf4Odv-b_93OLGV`R-su0hD5F^tsICMeAa|{-X~s$>;G8Sblgcm zMDf%4-Q#Jc$)xol6cn4a{Y`0-t;90Jf+FVe5w)7FVYJoZ0Su^-sj;4O{@Rwq@I#93 zZ&Vk^+={1vBPegC)|T}66I~(8#`7aO6dWA%qtJ&N+5HM!M846LNs8}_`OPh+?z=Zo zlz?5kn84-BFxt&9pufF8o!Qb@$lsQRFTGy0GlXtvVVU0WXTh+;g_8^JGj#vTrR#7f zC2RDMCKM0M<)@?9`uSu-=28C-O;;IK)wV@N8tHDNQ@W(PyG6RYJEf79?l`1$cc-*; zgMf5*!`tWH`+n#zzvJ2atToqIV~#n-h&<07x{CP1yWBNj=6H@0F%G&$7}Jm`GlL;U z-6A6!!GUwuUkKblFIt`NbMf9!4?`-%Q%EA^Ur4m25tsu@{OdY|eg7eKWn1s6%^Kq& zR?o}7#GjNFh-TED?_AGU1v#uW`bf^EG9PP=#?v|40YAq~z;F;8bN($wWmWEI#aU(=`xgCv=kphvBEEo{O?b|!u|`m@fGlQQC&c|(xBG{#mUj@ zJ!!F2|DvilMAE{}E8ZlTqv@Qnp1FL0(V4d!7XTUbvYru)VRcOS{RYm8=F9$d!py{>lxIZ#8u>SaI2 zQfIYXO^ZSvGm~bIM&nMh*Jru?O~h0c-`4d;q3L%c5}J7J_A^^r?*et=O^4Q=za;>R zM8kQ#+2}d+eOc07w(keU9_CPm)-*r74j&8B7{^8=`X&+XR_7SE!*}DJeH_Ur4ET4; zLlJ|>do2Ot>58#)&GOV*(G;5<9B~J#h;;L~FpCMr2CgoDJk7D`(iT0L3qRtD?CbGLHs6J!e;@+}SAmH%3fw44bc>uNBKSpIzsAiu3!5WE=3jA zKWw4Z%f{Oa$$?8cVElqGa~$IB9+20Eg_BeDFmrW2!Anb}Kxce7`_sha+HWaIh511h zVJ0|dy)caz<+SC)mgBhVOkTB8AiyzmPuJ2(IGqM3q(0=h*}Wa#ll$_(4(;syWn=nl zzOm8oewsZz`LlhkvB@)`k=vfh2O@k-o>Kna6tq!|hU8S#;+08gm^ehAYu%T94Y=E> z*YG2vc>G!2j4;7R#3U)_{nHm=oY<}nK6187>VZI9V(()X04Om&-koifJ-sgTNBHyE z&)6lYB>M9Q!KU0DzVfOfP-W#euRscRs37f=b(Wl}(&u2qpht9vyY``4>Ui$g4A}6^63r zVq=r-Kc=baIIL80zipM_tqHz$E9!-BXR5>zI0eqlGSY86bTF5SsHPvO;odD3wnZ)M z_@JG1dmOojH|DSDsmabv2veGtTrr9v;T{*vX2qU334ehG!lJEaysjS`CNL@tkLRl~ zxFLC_PwN}F;RKrMs@GzSA<0LvT1fZW>Jv zlM#veQYU?O**ZGk0)nt0L;d@=O!F62bO+MaVT@4>9n(}C4$~s^*I+j~#jobSp6`Xd zW7lsZy1b;|#uf=Dr?uaw=wwE1&lY{cZZUf$U%jmGzuV7hZj89^ZSt3TgGf5Im-N?n zL&MQ`&n*n6EaE{Mxh>NbD#u##TXCVx%_dK0ZRJrCBzs0!are`R(6_gK_ zIO4uMo?+v6ODPo(6kK#x8yiLwshj=7EGxL*6>O0lz8vWl}`&xm7gm9(@KLZ9C0lOW;{ceU@} zfVgmBcpnMQ)cr_raqNX`mx9wjL(a_~@||9Q=;%mMSN9latCe-T&K5{qFR)4iK^-s? zuzgAfzo=!6a4r$Z!2EQ+jHoE2|MNCU%inleT*24hiEjeW=Y5M-M1c76VehK?52U;+ z$8{Ocb&u^^zryUhi0tdkUJ1ib7#YeB!>^WlcQn!KAt*@3OrWB7e0vEX0I_>1Fc!u+cdO zH2c?9y0_cwx8qH6{oSyW?BGe*6a~qp|2U z&Vg8yt3oEOpxi3z@vOU?3XWm5bU<;z{-Xd8SGND& z?9!-7_;1f_)z35NwWorE0Ie%qLIr#uuOCk=fB>c;V(+NhTGq<;%Mm>w=q>e`He?7= zuvPTxwC%i(M3l9%<)$q{G%n*GfnK=i$D~FJg(chv%HVj~c;6AYM$YXrJ(NcIl8%s9 zl5z6%1Zl5xxcVVTn)>+?F~Yl0j(b5L9RH!lfvZF^T@_)Q=ubUcf**jK{W}Fx z#Yutfc((v_RLQ=mpBbtsRlj87e?J9?VblolJqCL%XV|rV2Z2C(f)d<>WDykd^Q|52RWr%-{6>qI^cVh- z_37UQPx!Vw7D}=Xk}i(DPCM=rV;vtH#1>d@PWmKoSo>Z8kpC;kr$ldT_e8n=`ErhI zrfa)^)YOpAtewt`e+c}obb-6bX%X$_{d=i}2CUR{zmiZ9^DAL>fco7HjuX?9Mlq~uXBS+rL_Uq6Ii zzO*dl>P*krms`sm3pz&hInBBPguQ5!AWs_lXmuT#){T#2(bkT0c5d&t>=yCT#F#oF zTz1~QXL=^O3!N)~K^_4i1zpLS0TJ@(A=wsi9eK{_|;HDJ*@CD=8kogh{C6Un{0voc^_D{xim+drYC(&~CB zgm3Eyrsm#Zy35lUE|{28;c%RY_})y)?8?SQv-^`JQ#4e`i*4VLFvY5KzqyD-d<0!L zCZ70uwVa*B3R|{GEiHKoOx8`FPINeLu+^Ead+)AZHOeb|96so_;5#>Zbuc6F+0%0R-L6_1nUR}(@s%2U@|sC5xUTzL$wx#+wj2~? z7)_fugFt~`->cPColq2#*Rq7EqqBwPTtvVUyPm4Tu4yZa3zTA~=S$u{z)^7zd6ntnPzheYTik;6AxCg3gYQ$^_g0XL^7$U90$TT(7TD<0M{H{ceTrfqIU#n$5& z1n+tp>eSk8A%+*-H6|%aba}4h!=FAG^fGFx<#&&4tPBD5{Ko+_M2&hZST9~4Q=UkM z8KZP?qpEQtOe?DXFfJE3&Eyi`Wn~72duU`;j2z12o8Ev*Vw#;qg zK1Vufg>QbStCkFHd%(v5bvw6YjV)xTaQDSu3Njyi?D}kY#5;>#G`q^IFmQ zKSKwp1Yfr1Qy1qRe4m=fGNlw#nZ9eNw15~k3Ks}_H3(;IN|A|0Nm-LQTV=Scokq6U zBTXlMH~{pU{93>O2bcm8k_dGQ2h)nq3dRQS#7!m$Q zbjyUu?D$X_naVxMX%Cme^LxldjkXII5fjRR8G5dS)me%tmgW(qSte;<3;=ivfZ@{j zy}=@vi0a;SV0YsN!9QLMkpO zn4R72@c7kDijR>4kq^;&lUVT2U|f1^ywN~HES-P{Nogs{su`=Wgxhw%dPH+h!WP*Y@z|4RVXNEEBAa#yU zFQDJ*1E`&vm5lpCI*>9=AzX0ZF=Lt0S|yoVqS_yn5C`o~{k%3e&MJ$62M8{Os@os7 zE})rV5uy4v2a#ogb&H{*4xJLC!Jq}<+98G;9k(iGW%oXh85&F7hN&`ldYhbbUD{2B z@4EsoRbP!JP7`O3QtC85%78it1^4l_U5dD#nUzbAkS9V07I*~#+fbOMa1k(q$nm*^ zPtkE2M*Ae^&HyZUe}L4-6ZGSw&i2@1AsO0J9eWhHIJIUz6^V|ld>@o|Fz%A{zDm-t zAz*m+ddpfSQANxpgb5c^U7H^5EwlJVTlypB;7;`q!aX4fc!ia93<6JqyW&I10RdUg!_v~-CVXRo0ia>P{2E*= z@x_>N{{lK-z($wNZnZh}`b|RyNQ%dFJ6j(IGG}6sw6ILiZ;#F&a16eyW_pxLv?1SM zTJTS~YP|(*O%PydDt>bw3hE;*=4^nMbXk3UC$7$6L(5>Q=*iJ(09QONW2VCA7Kvy7 zF`Nmvw*-$_x~c8F9> z7pwpWsIWPa6-;V{4CsWOa1q1b>_4eW#hD3vPEeFiZZP0NzSn(`j*KN|nTVOQ=_Ntr za0>_Cd3vg#K;@4)RTf*Ga&4Ho?B%KfnRdjPHEV+1C($yGf+;T#ow}ulZ1Gc@2j^kp5+1OKsU!@v3>?)MF3z0sv zT+2!j6Y8K5YkXFwP{e3JpVffHZ>7tAc+%h!ai`kBGy4VZGqQoJSo+s11}N(#rhMDJ z!NSB|5(HK46GKVq&*}n#jv619hT$nb6Tyxl^;ccNimEktsCl$=muq;{kLOxvxEv=h zmVYI~PX<>M5jZr}*2+4&HJ;ALL;A|*ErjJJdy)i4@GP=p8#$vys%JMj|nM+ z>=Tv>2B|-e$Me)l%aFU_qB6WBnVRRzF0^=sWMfBKA0q*N+h=d@+riIbXBl(S2{K-xzcR8i8Y@|s+NSL6l)~}>B zNos`jh38O##|#xFs>1MXqor_1Jn#y<@NNXiWzhM;2_}1nG`9^`G!>Vqh+)zWi@&6t ze(XCKa1iV~02dlZwRU2U3z7&pW+#eOx8lK2q=+WHZ_&bT1tgJH7qX+GFPC&`+kucRZAr%OXmNN)H3|1>1Z+w zV0oJDxr?WwqGH4fd^SNUzc(j|b2jlNV1d6e>jR`#8|3oe?o{WzH-l(_7sFYL zR6)h5!ml>^`oP|}zAw2|S5i_A5)J~Mw1B$bkSil&cFuTQm;ybo_av`*Bzo8&_v;EG zuzo-GSff$qwQT zn<-ph_%WS=dqX6?2b6(Pd#-V5N4*q_fs0@Q%t-7WNPmbhRmZt#_Mgzthq5&2dAXqU zh6iYyPju9%2nqZ33HT&oGe(0nbBvM{xG_LIQq|rjC5(w9CXob+GQ0wTL=ayX7%$5+ z!@V_s$@_-k)w6hogo`-kNV|>^##STA~AMeDm-Gz3Q17d3n zYcCeItip28haJFtu%ejibJdQD@GB7`7cfDsf$WC zv~9k(Bn7s+7@3w!R#d+SjQdEQybMUqs>vb#9~qvQoXt`#Fd6O<0D$=4fGc8Z6dEE7 zIG$LqDn&V8!mw`1h&5&kuB!vvj46Jf5t zdT$Bu*leTm83LF$b6kx;z+w`Bfw9$3aul8Z2EfkE@EDOiG~FmB%eT^+j)0tt%b&D9}zv~lxt<I+`NUt&l_glS>8grk8Da0tc+9!39~61NCwMpy#$A!RLMz$Siu61)~O zhy&d9lrbNA41z=2BKm~F#~^4_t{HuO%2DRZdLzf(gcH$o6>@U;HC|~ zj85MtCqS+l|;njxnS2L`exlKRX+2d(C~^CZ?AUQdO8q zy|0%zBUVyFetY0*e~AE?C(FUgM9|=#IZ(SJ=k1MOW4|t{XoZ?F$EEF)@qP{vD<&#q zXmt_;3hu=}*hoz|sFI!)UB_iiij;JI>TxNMCu z$L?I!^_!jh027F_oZ0t(f6N%_|AL|CucxfOL?Y&_u5HeORiLh%Sy{;q zmW;+>a43A1X7Ps~(?{B#r*Cz$U#1pIhv9$fE_<76enl84Ju^I%bXca@7i_M{kGx%8 z)6?AX-3tULXf<>9V!9WlDPj;7eD#a0nSRJK*C-{q(;A}di0->(kcTa#F zvD{c`6!{OhSil#5t<8N8@L07w$A*^3AMA*w!G49&U*@~N5-~)@acM}fF4!AnZ+oVO|4>MAqs3b@!0)%)I z=N=)zt%g%6&}v81wEwUJ8y>rlm2i21eA#t=yAiz4DpK3!`Y zvMIOkdT)f3S7SQ{a-jF@_zsX3z2t}2A^Zw8o0Xz?wkQ) z3Ti52LijQyyLQ?9OJ(sVWT6>LBp)%k-NBeYO6f(LwJZCXNuhmKmgg6uo6Nw)0u1=< ztkAy<1BT$cX*O#bYJ(Hg2y!sSb&9x>WDz79LB)>?cjJ~QEkqQ;Xx4Lc5mHd-==D@r zg9)f0M_OpWnnjX?4)9p~0HC0vtKE9kt;PRtAVan*jke&53ihD3TWbY&65u9!`Es{$ z1;}qWwUb!B4Hq~LrbTetvOiu!7OSGG56NtFDb}dqBH`~!NV(9NcIO6&7&2R=Mvph$ z*&C}_4Ml9gqP(crycX+kxc6q^W4vNMrE0fPMjD*j40H?suH$k!A+}v154Pxd_8VCJ z5}{6?D^Hc_8zbQr`mB!%YDxIFj>fD2H(3304SK*qVgNHAOHoEvHjUF3Ka1Dh?02p( z3*NQ`1%*{6cY95KiJ(kRB~b)NnZf<9{cUTwJS8~9=sTEi7R!mq-hh)Ye^;lyxcu=p zTD2uP-xH>ZIc@7rFzK4%Z@dV@VvBxisd%aIc^cf{ae{pKB2~pO2+>@$jTIb`S&VKv1SJc#86Yq0I7kdKeg*q~*_hzusmMw!;_r{`CV=3_61 z7NeYZ)2>886w&_bV&Y0NhOC6#zE$w8HTa*jqL4PGMe7Cpx_WgnPKN4=KR(u&aMowR z^Lx`M;8k+_mP-urAvt5%1d#`aBNap1rRK62ysIA#iem7Dlnzf8`WL5hn*nKnf-eV( z*GCJnKs6CG`w)hOg>`XIoHO0xY#xeCv|Lg_t6ge<%4zqF=&RwLS$Hr3FByc=A-#E- zH_nQ++Yd|Go&GVuRHUa`M|;~$8PMk#LQT#r4Bj|XMeBYG!?oCm(vkfIWb=AoVcm)k z8Wl=V@8dGx>$c8yD!O8MM8?`2f7Ykl^(kDegFXHR!F>8+!@d9XB1ixcL=@#{P#-vj zSV`gGYqe&pvhE{3bP|g3*f(4}-?@=2A2Df8!+_2Fwc7|fQoPTI^wH3CV*#5v1EqBw ze{JC)7+1I3ady>?Mqqz$6BdxneabMrW$@V5kLU@)!qG#@pO~&*qVS{;lQn0xzCMOr z^P<62F=Y^wof0b|0T(Ze7(MS34nQ_C)OOYxIcF6;j4MM6HtGVxZ-#OJ6N6L@wkESB zTaNWdRBR@-OyBHzgA-b@H8Q&1vLnHd8@k^FkBjxS6}_n3sRojvgyetqH4T)pSi#sx z(37tqn(BnH5pe9bohte8(?7ayOfY+gNazsZ!07Pl{pf-mj1Il)EP1)J-EohQSC`~$ z974jvs0@>B^n6eLEU$2|_J1wFP8{*5WS!M+r|%>_ z(wR@eW=Wy{DcnbA4s*4|6Ap$ z$Ez&~Df)hCK(cERqi$=z31T58B_*X?I{WvDEM7Jrj~N@Xa?Jboh!NIsnqQU+e2VGI z?z0vfHJ*d(=y)9Dj%L(Y0T%Q1zrV_S@7hW!xY#W{3BF-A>^5QEWF0c(V7JRaD2Cj+ zqte;l3g~<0Y1ztu;>mP9GiDX154>r0e-PIoz+Hrma1SC5mU`#ne(h{>#^Q48>w9jV zWie+1b>D!@EuEH28?IRH+g2$_;^p<|_fx;&?kGrbp#}-`_Vpb)dOmzw>tJnacWaYM z3nQ<4Z!HPP7nJlmTHV!O{kCvpigJE$eBSGidBQ7{p>V_Z4NNi_N&L5;tbhwGj=m3C z+1$vQ`v$Ph{VG}Y09!*~$#A8mrLhnkc?64`&s(1#zeH>rZzj33e_4?y;GxccY0}gw-c!jsLd72%xr=Nd9kXtp1-9)J zhTB^1`Y4edsjTAE_pB3kmE(7w>RQc?>$HWj=JEB*<6=;#)$>VDsAp!CZz0-uv4q|R z7TiC7(U?s`Lx}^zCjxTr{PrJTbLKjIZ&ws!g%rGzoQck03yOLuaRS@$TW?C9F7A%1 z9Ctz@l#qh0KFVE_*Kh1~ulpt~N%!Ku_r%p~Jp1;B=dd$^=hJ(fR_XNUK;Vt1Hx*s?WLl?gNa&Psuz%56l^N{y`QS8*Sr7Rws8h@0?Wg-hTcr zV?!h?o$|+}ptiJtfg9~j7pbQa$k{qsz*=X;^+vlU_;~f6R2!rb&yIY)R*u(xw_coh z>Mor)Wwgz;z~4cqOVZ=rqUO z94%a>bUjlReb>pE98y&j90x4*(ruWwUyw-hSRCLeD4W_biq$u0Pah(C^`Y&!blix5 z9K`i)8$=megbvDb23=Vt6g0%pdj@|-dRf0*RxSCouFGFBK}M3}b_g82=qlVP;zK03`WL16n`Fiy(9rq`ywOYiwLV!KoCdElyW@R#S>4hAEQ z=+SA1^87{9{$ea6r#fF`ugTvlonZ1n(n>+2PY@G8?;x}jpe_O(Q8tfr0~jRy;9<md*Bfu6)r>(?syh&cCpJHT96}DSr$X~J*Z?$WcIP) z&y^7_Hzw-}p(^3c!09)f*Wyiq!0f@3ae#0?whiTO4Oog~LY#XQVt*8|e zdN|En`1fc22CE(qD>$G+@Vg-Y+2Wa*Jo6)AC}w*$&)wnH*8a~$*q5st%vZOt!AScK z(aFC-bhPZ`-w#f3KhuRJcsCiIy06fd;Ei^jb_A@N46(Qof%O9E#SYwKy&)f#YK&nQ zbexfZt2qn7=!qLR zyLsO0n`!2i6n$oV6nN#9Fzk_7Q4(_Sj*I0~(zrmk#X>z%#b&KF35XmG|3NOvYVkWa zl@h^4cW(ekzo4}>9e^4HL7TsD=Fw|rTFM6d&4D03HLyL>+I!Bntw7noD%X#w_TS0Z zvLiG`8D?_vq?#z%K6Y^FqVvRAL=Sxn5TfX|Q)JmldhKqF_|!lZyJmO>u@QE|3^R@@ zPv*_HFS}muE z00PEk%uJpK7k;X@A-muLf}z=o;mNlvBetkZXi~M+Y3RPEx!hIolc>UxTxJV&r5!o2 z;?=LbI`&)j_S`bY2Z2=g3$G3!vKJ;}9J@~I^6KgVK*245?p|6^F?NX?X>+f*zVL5{ z_sx{YKHwE>9})85Powoa%x%Wxh;8GVN+UMg?OKPltVGz-MV*GMIG*YeEzx{$BVrsk zo=CS!PAx<;MBLk3It>^3%bL!OBLABr>Zuj|xYtWSBTC z;p0Q7p-S}Kj6ZNvHJaCif`t^hZt5HHrOKC0(sK4=MjVB(By6WjTHuX+e*`>=eoa=j zg&h{A=#2n_G=0sE+xsg`_G4RcVSPZs_13D;0LBJeQ=>7ge}o%p3OqN%EH{Q_cd$o& z`pds3EXwhE`|)|Z2jEXD7QSs1tHobsm96IV!*NXf)*e- z8L0^Oo7*T@7$hNeOy6Kh0?Ln*6O{NJn+JVS0=<-2>={?RVVldaG}jO>rdQ6x%c~Xx z1<^llbcFfbWozF#rXK5TynwVZ24U-P#}G#eyNMOi&?W;GJ{-|J|LA7g!10jX$3GR* z**pDnk()dg+2|xI6?ob)wd@X+LDWt>JD79J7_}EvfT`wGdR_gG(%J%A+kP6iJAdZq z-2tD|M#b}keQfpp5W@VJPfphhATa5Adq))0tgo3`AzO5LNndsKT#%*XuPDjU9bMT>8M7v-&eGvN=+w`HYe=xCjB2#F0Us8Y zwDdFf0h7>rH6+EPt)n!D&5*bsOHRvbLhO#uf5^|f`KJahUT-G%w>(nxQ7+(FjUPC? z+Xrwt$ng!;h*D~@QkQaKE?OjgbU;II6jt#$#dYjeOeI{A) z^+P($&jSl09%m^MtbDWzQDUo62^vPBihCHWJrOosgW1p1D?tBQJ0P0i&#O0Sam7}w zfAAIKj|C@E?I4XT_SZBH#vog63F3>0jK(o!}?25%m0d<=Q4lefObgMU^1455 zJIpsk&4>6>GBI}6(q6#PbW7R%f<3TWwK*)=W23C&g~XjX1>0sExwU?TX|&LZr@bzF3OaX~Vwxmxt}sEDyMO5a$f8F;@ zh$rQMJifKq0CT{fmW!ezPz0dZn7L6xvz8ap;7Rc#0Jg0NuX;DLpW0q<5O15h+G)*> z5BjUe&8_Ug?vB=7cQSu@Pt#`=Xj}$$T#sttv_5_Xx3`3dbe?Ez>QU z=*!Ctd$dB<=J(=1_U@8j)mObfskLD-trst9VXWC7kZHA|4!$TDGXK)qF@LRoi|ZVO z;1^Ru+4{3zcG^eh^y{KD$gXkW7*xTAA#2B2bngKdVz7riyL+Rg3^NcCm*)GYCo%9+ zg@#tlnI~@#-h${76y28bfCT}ZvB*rADy!aYWQ3tHCx1%YIL6|wQ9*NY1sB5cUYSE- zlQu@jN&cp~hQUNf^Oya;vGJGZ1N&jm_G(aMq^sV0e}C}A{9GbzX3!WDV?sxSIb6Je zt9U&W57l!}tXLtegc8tE|COr1`rZDgNFHjN+ev910uv)+%&c0AS&4uLA2Co%hlgu_ z$67qFva(t%R65qC0g%c3^6zjI0apw_>T}5;))+PP9^`YF`Tf-DBG!9N+1fO($6f$! znfpFkuD+q33)OGkPY5~)9Xh!~Fld!<@3;WUk->;s%My4mnwylmzK#rllYu|jglq(W z^oo_+6jR}lGSLFgttwu*u;wE~lO{~r7nSCC4_QLSgrEecM+|c(Aq|3G8=<8%qHliC zFntD6^069(l4K>l{3WK5JxAh{REAP=5qde#Z0b*BeqgB7RTzn-&-5-ILL&2>sDe!% z1}>>s1%;(fB(nMQj|Gx1eREu|vC*xuSXTl&c~7OZGz^6=!^cOcc>z68f=zh09`E-b z^DxZw>C^Opq$vEv*}nE=qxv{A*}izE{rE!dh7UpI$qS&QloaQulct3<_E*5Fx(`4| z%iYQiazIj~*>b&Q6fR3(dpjSXHQ6|RDo0RG0ixEy)P9Nb;48rAGiQtgua&U~YdqR? z-vO5!wb3S^=?#)=t?<{dZ40&T3#?TXkNm|N0?h~FR^Q!Xhuap1Wjq99dc873$0?bj zv(#wbzoeM(3$2sS{Ian;mhBB*)Z*E!$k|SZGR1USqUzP59?pcMF3kAKJs)Lag}4!c z>q$y-uBlnjT1;tWa4-L#8wnH@)#&9$_)nBbnetbqfCLBirf*xL{ha;YH*WVE0m*Nx z69bB1ix%MJ(X&zO-7)yfF?bDpgZRCf>r61viI7eycs#g`f@aU#EI#m> z70w%%?ct;7t#GI6ahwNbvlK>E_5Q)ki=ruN)JyyXXeG}SD^@vsYinymO3c*m?Co6v zQx?@gaGHv$>gUa1jQyEnl)g};1S`9#txP}z5Y2iD_!Zv&ciA}rJlpd7QLP;v@wW%* zJ74i{j{`Kb)%c!mM&*(vdaGS3x2|ZFbiX_aJn(Lor9}h;al~t1gb>osi}8^%^+=UW z_-Z7Uo7xV*cu^65$+1`bH5xI^0p*J=ci6o^8)PO#8zS<)PW3DQ%8FdIJU1eUES8e%RmK$_@Mf7+410zmY7sVg)3#2dHiPbW60yYcReBxO+SYGOWWo2Y;0~dsrKJuq%o0r`BbVirJ_z4|6%xX1iLT=w z272i^6XSAjMIObbd3CHZx^PG38Jknj@rBdWf&VUzgrl!ILs{ORyv9n{`%l2l3hK2?Y z6z`0tT)jS>C8VSb0X|Fej*gDTVf12kb-)^?l27j^ux}TZkdQDA!%%f(jHa(t9n*a$ zj&;6%6yZ%``K9VuJqggxVn%8Z3m76+8>7g*NKG}E_w~ZG;>Xs=1P-L z3C)ld7cX%=j95DBdkGvscG^%8n2keO;^Bto#4I-ufv=V_0S2h*+KAo>gQ4p$JqJ-{ z_X(=M5n-fk1}L|Hw~xS}3pbi95qVPCM#SS{^Tin(7}mASk~UWTb~ys)RT7x1cW0O0 zBjvT1xB5`bOT*Zj@G~+Jd1)v;e#n0EbAtF)r`fzBN4xu(c$9!AInJ$2_$ex#@w#MA zjn}XQL*Ln-80XdtXM)_gVg&!C(^d)dHxfm^!W5-XFY)5XspIl><6*-t_kt^wvdPs} zbEPm5MW!D@L|jf+@!plq$tWlw1DGBxV4f6w%LMCdeI5^rZ5FEofDm$YbaedBMx^N} z#W{ZIdL3SlK+-b18x4}t^=EwDPZ4thshp6N&mAD!m1Jjh?3{N^Z|eDy`U3=$y~{|h zOz7p1_Ajt@&3N%gH7l_ysBoemp$z$XGGy#}E8K^6WBRSw-&BMNRSX-|&rFJo|D^Z= zJXFQRUdMROZ}OjPMBC2NHB?5dsc>TNF`q`Y^jngUu)badcZ5Y5&PD;W!9Xq^vjxob zF)i%r?@CB*4T9?93Q)DyM^dy;6Cu;}`&KdUU_|Rvz`8Q*3XlkkbuyVhbpd2omuhKL zh((3A<}^1Z**^WB8S2xmZ7U|>BFD2X^Yht?;vcroMwNwIiw5j74+R&`H@mJj>(7g% zD;z=Jv-9nD^Y~Y2UsjyU>1cLA>)-jFmhLh)R)$w`s4mYjnCjF8Be~u<18%>&y}OP} zvE7(7+-Zt@2UnFgv8SIS-8PmW?ap(abYil;`ruP1plx~iT9@N^5&oJvUqw`lsM+TM zWDF4S25Z}HbY=m4tCooqO+8Oaz!%60QexJr1DNlFeI~{P8lubARTrmiXSIfXFaUwu z4`ye;X)wUBqF^ULWJyG6gzuV8WPS#RW&-0dP3h27ww%#zdZD9C8h`D>hTqgzJ?%$7 ze_+GsSa$zLU_aITOy_ZDn?-#ZqO0C;rvEq8QWa}qUCW;Zs<)a--`JW3iX5R%c{?5h z#b<^eGp6$W1}65EWBLiKk3E}qH7<(pjCLN)jb?ST19aJ_AKAole*qr6Sl}$w+WqxA znK?b3+#??Gb(Q6oWrN>`9>0?zse81C7^g~&^OcvC0MQ10ofV$Xjb;7G>^O?B)o3TK z9GoL=1&!sg*#r=!`qRCrMmCIb%^RZH_3|ZC@WFJV>lpd;@#=uvx}z#Y7-wU6jUK^+ ze>l|d*F#oP-h#kzfnKr0RStD+TXmGVJ;|gqE_3=gV%v$mTwY@5uqqSjX>SCyGQc8|X5!yLpJP%K~oc?dQE8G93r;q6k0N*7R8_ zb-iQ_Cb-jR;B=3rov+AZoh&zH_FqgADl~6mLvpRY&yiDZzjhyJC8MDaIQMHAr?v}o z#ObZUtUlyPoE7rogH;oqKWmqT%-mcq_-UCiM8Y-l%hfIha3aQ6 za%Jgsm=*eLk{2LIwaYXT*<-e^c}e~PfA|{P2ojpFYl=U!+4NdgR><^;?e*XQ2GAC6 zr(?x*C7aC4jugdl$y+9G_((xZ6T=I9IV#HaeBnj~K{K?8N#6@cAF`NRp}Jc)!W zKWfH|d;av|1B`%x^&>$3WwNTmXXxU(+oqIY)GmbuS0M6e80d3*0$!8^yqOY?LkSLR zrqy;tKDXbuioP~r>yCM@+jB@n2jC?b4w?*}XcFVT0ma%Nx zE!(#BfBJm?zjL;oopsiEdfqSG_jO;;)*b_Bnx8>*e4Y&xDpM191=A%Z+pUCKCtP;t z-^5l{lfA`F8!fe+&?C7qF}%O(dF83s!2f%Pxo96zB#)A@)c>amzHt!938VLxYkvk4 zJPgx;^msp710`JjlgG&xRgS1?1Kg8_O&Iw%HPaNm%rt%3I^nq@tA+UXxV`8Pr<$*! z!vkDr@+IwM^t94G`Uc(C;248tzh+PMjqks6XSCMV6r%)UJngx;!{Udr@)$mU`<$%m zo#Ui(h{bindAJ75D$Lpq{5^dNN7}w511Ax!>3SIbKP~{P5l4ovMy6b!Dj-k=N>l}- zP-4+vKwBx3!eH`{&8O{o_7~8W<*kE!9oh;yu?ZyfMPw)ct_W)F2{tM&c2Z!9P_hZLl}g%*`Tv)hNaa_-H?)FHWmFWWrYFLW4DZ~FfFYD`HJ2^AGU z19MivKOy@a{(SCHWdKCAAUL58YU}IEqr(B1_+R{q!&T(PC=L$_F)iZpB|w7n#xB=e zo(6Mb;B?OyQVEOyD7RwZ_J@r_QLLCWijO7@Rmo$DPii^ilXbr`@RiAgMctoOT;zcN zleb|>`M)76TNogK=y=?QgF?jl)Z{WX>}JD<8n9SoazVW_E86!6M&0A%^5lo4q!-(d zSTNa9WxAw;Mf196B!HDT%~I=G|ja zIMS2|1Sv65OLf-l>1Nx#8IoCYgY;+rd~tVxCPqHh3LVL<)w>rTS%XxO^*ZC;o!`1RTD=_%x}n7W+S+dD{#3vsluyHL0}ZS5=losc6BFm$T0ypmb1@DdI{YqU zay8jQ(=#w)-RZSKo<~Yr8xm`vAU2yzqTa8CTMY7jliDz)B$n-Om0~(sBtT(%#mPa$ zkPTPG&RNBTiZMS80|wsy|NpUpi*)$)>E>kBr%h^SW=4ts`H* z?dP|`_EX(Z1)P&F9B2V+@Jmwq9Ky|7q*&Y?Uw*5suxDZ%CW1bh;EXnyyMfu?4qX@g zn06qLIkg0)IKKQWsOACe5ZiyG` zhPB!uw>AfFTHQ*Pcsx673UMHv7L;yPV@)q0sGW{9$1(mp4_t3)k#NNK7Tm0cKyxe? z8DpZHv(t>oZ}cys!q5LPXkGSkMP${-T5rGonsNQvivsSS4QYO0?^4K32#e+tj9sAZJLZ;&q)-%Oo>g|Y3`H3@)_g|d)5n<_Gk<^AqXOBM4tm6)K zU`#cZ!C6()`&$LDSql(qRrEit^IZOrV|zeXuke}UG;$> zxFrZ3hS3P0B3F*fflH_1meO62gjAn4MI*qVQ6sw zU9W6P#AfA*OziXVEcb)4mw~(DRSUx-m+oT2FgRKs2FXgQ2q6DGpUP}!`x~h$5OvHT zETqkPP4DsbSm+>GWa*EVc{)oNG(l^Khj`Ojk}Hyz=PQ zom@RWZ@d^EAQ}qnCR=-l`taDn0po+Vx6u_+-#K}lq~$bNWKZy4=#=%S%)szZR(3v& zKXBqsV=@qybKMC-0D7JEW!*M6Wt z}BM`hj#{B?Q%^KJWhFH3;Nhc|@ zKqN~d7~C|ciDtzeR@n~_({j3&&KoP8W}1sA?(XJwCkwrK1_#Sn>dEs`uhF9HM62Hc zasvPVdy#;QLKjm^RQ_W1FNYGE%>E#gZ#TutSV0rE#=heg40%}Xx-Zta^1}+f-voE5 zzA?6VJm61xPW!u>QXtONs)!nrAw;AqTS0i4M?KM^ao2nhO{$lr)R2_qM1X+En{&J6BY+>iDwU|ny z(Wv+R@LM+vn35s6+TSFI6|Tza++mPzA0++2?*ie)unmPO^qD2F=2(AbIVPlB;!x#Y zlQO$dw&+C@%SDK;pmgk=c=kM}@pz`J<)-Lsh$1s-nsBW`O{c4kc}^$eygKj%zkl~| z>btdFA;VQBmyaQ;f}j(xPVe4(g=@|I-^-bEciu$N0pir3EWw&h=gyc8G-eRa0d4M@ zQ#(B*Du~N7pATWptXf=>?pWL8KbFHiUG!l1u1Bm` z@N7Tleiyge3uBCyO1S&pjB|e8>t<``z}#I@PHu0UbED6SBggXLXS;2Q48I#Q!H6EBMrvfw_TSg=t(GK_F?(x3Njv8C1^zeJBlfz`*#N^niyC3 zau%-=@ODZu4DfAt{|!Llad|F24k8-=z&SI6izHGAsACJ1v*f4z`)DpYnADK$%dkGl z?)sH@&dDwR*O?8X#0Xon&TA60_sA14#KdO*35*ve1Q z;0!VTB%wr|bDVEhQX)i>oQFl`m4T@<>%xvjdFWMrsREe1Vw=SCKe&2v`uA2E zzeyY6V0fGzpRa8QJh|ItH?)1THf5%O`4kb*kaobWmMvSnzdn6BseEF!KxXy4lk3l9 z_Ut^@JG19hLXA#qT-=9Uq!kuCzj1-dd^8Zums!_37cB-UeD5d02Zr|u@8`YM)hhdb zoe_tBV^4`uC#tLm@2=*k;GV_n6Rtd8s66(;QP#b~9{o^Vg}6EaJ4zwJ&E+G_}}FZU-{966rHTJ5>};pC4;kTu%}%arq)z?qB4&j@p%!5l-IZ3WB=2A8JjLuLwkXJHQc{W~K!@eZ8Xj3QIqQZA|in2(jhO}Jw$jw@auo#e5 z4I=X0l%M4SL>kRn@w#?w3(OrAJGOV|Xt&1_zrbmL#qF%M!f$6nC^_nffUnY}u9%3b zEM&AtLVVRULOnE^SEx<;jV@Bqmsazuk1rP<9u5HomCb=^WsQtA@1K_v%H*@f!coXT)SgSf^@73jcWh|T*Jj}9 zqGR=Ai-F@@uWNlQn~hF;K%zfB<@`i>R!8lR|G249PY^6{R)&14-T=fjy1{zIkO_}a z(Xn;S3qXm2f`aT8bX?~JZIhOBtKs);rtCbu{ilq;2SDDF9X$*GP14Du2;Tjr4C>m8 z>j1etl0+P6yI|UX^RtlwGwAzXi{nJ8>nU^VHyXQdKBZ&6$1@$FMm(a-T=Idt_7815 zE5z<#GrH|p>SkGUaaPD0$&)p1Vykxs6Lzk%NOu?7Z$i31g+ZiYz7iSh>2|;sLi%3qU{+L1q_Vz3wbPk9K`V2U%Yv>4)+8m z6klz7zroh!bYEuQbOAmW&nHz2Lk$Wgxw)H(H+8kO>Mc%3kjHXo4Mu|qbQo}eynH8` z=bVU?G_d~{Cadqemv}Uul({(-K95VGa*4dLv2pInUmn&f`f7r;(6xwylKf(y_z{brm%;(pzB9e2tTnmyeDpRShpq%>f??77gSducfzVX;9zW^K>57csp6}Re&t|FV z>G}3vfme08?s=h;yFO?0@^Ddg%}qlSVbB{g-Rf!!7_W(Rygyq4X-1L@l{&Bp2$gxd zy5FGcvicIUoytEDeyP$Q9Hiyc?5qSjx$ny1m{UYncsQ8s5Ou7+Lw9;{X4P)|o4VfT zG}q>YGF-rx%i#Dd0YCjcKJ_L5t1>n|-oe<%-k!P6Y_izr?O7Y>CTUV?9421rP<{=o zK}?z>B_^iJ`k>zuwb4}Btk5A|Aqy#Mkx@2jQxDaK+QO-tmfdH z#uKC0u_B&8Fx)2&l{pNadJw}joofLdvhK+vcSG*|Qcv2a>u9S`Fg*nDX)Uc@N*pdX zg#hVoNy3Efay>O7|Kld)sp1(sux^2?5%MwM4wuu$c&XN;T&Fd8Zth2XLPGOa2re)= z#s`QZrYHX@*jIOMWU<1n79$b(;x!&SoyQ(`@guI&FX#P=iP(gXIkzG#TdwwY37^h= zXQ^nVH#go?;{Pp+!w6+?SYrWY#?C;L(Q1>uY$}s~U!N$*zbd^+Lf_Z!&ob56xDaRS z5p#E6Qk&s7p$=zEUJiDpHDisISAmVINmsr#m_!kGwb))YD{8t#j?PLeOP9U2B45^T zeMF7~+*Yx{4Udp0Sn&1OcS$XjVp+J5*Jd|*`@;qbqGqhSO5(whmZ z{#Tl68~&JTLZDrI;~|uLi^j!$OGxzqojdJ9CB4gV+U!2qp>~q&At8mSo;y{Mv$C>^ zviUG>TSp~xE+-=)2>>8j?f+K#{aHa%Q|jla1K0p|n_ZyC^JNk#3_4A~fg&j@OUA;2 z0_Y?F5&W^J-53F~NNB^F#pOIVf~oQBWxAqMXk!$J>gPXl8kKQ%aKD% zp8W{96pWCx^p{U*FCd=ZkCPzS0t#F*U{`TD?jxa~2m?FmM7HDNc;89dCk>pilh)!N z0s%fO^4-smjT9pboci#B{0{%VNwC&BNS>J(hJusJQPZdaWNwVvw7(gOREx3=PsjKD$T{ErR>GRp%SJWWB%6;ML1CWkcw;>N$OR|nKXU6u4q~-K@irZN}m=s zH-?Ye#ziu(n>!sc6Dy5PPaip5sP>*`8j=gXtTG0i7iI#Qr?SujnUHzWkIfc@U=uSJ)gSv{p>@X`fp0_<5W=s0 z8YNfXf&Kq8rg39xcmhcMMvKVLL}j08lE1EYen0TK-#fsgvW)t^wIm}A6!F88lLALx zbx-i2q|_xznVDmNJBIkhdOIvUJkanISUA7wW{7KNAoMj_1U7I*c2uP6b6=+hh^t$H z*;lL(``C`5Q;%QP{kz*yX|+fP00XcMpBH*?XoOv0P+H=6FmZdnhMRB%AL>mr$czI? zuO*|j+?`{o|85(<%YcOTgR`@$=oV7M)%44~386218~1C&SHUTESg`VI$?B)eX9C9E z6xif*r1I4>nH4)a9by08iRT}*?;JwLOc0r;9l41T97KiCM$fIO*QUhUX&~n3@R3gf z+m#Q9h24Z3VLK6U&I-gKuQ}a2FANJK4F+pBAacSXNE=T!g~VY?mRl)u@eZ7BIjY7~LwcU}a03djB=z$Hi42v`+_EkB1_|yS_qI>&`cs z!itG?%8da7O51lW)Qpa?4Og!JOkW(wHwjX;O?C;EM;okLhp9uROvjHF3z2H^O^XYCD3#6qLV#Dfi)vx%@s+P{tBqZOYS&1Qoqwsh~@qM&cY7fVe;yl73) z@bxe_DeB^!sFwGp&xBT9`6r_9SGz8lx)6-@<1PCAtI$NkM-CI>d5>v5cUIQb z9O%Z!U5bl~ng64#4b+gy^cIUmc)$AHj5AsWyU+h_T%Il}j;$SuZW6Iq`Mm~!waE)f ziW2QdfW*!Lk1{&MU!PE+H9PednYf>fyi|@pjys5KRrljKMpQa{Wv`HmC+aJ%E9!b1 zAH89qeM(ZbWv}Kv-bgPRp481Q23r`+fD;*)UXioo%wn|xk59O@qy=8oU+D?>@P(aj z#l+!z>y^bEUB5vY}dP9WtPg>k%e~lQdLoLSAP|@*Rv6wJ)_f*8D(^{;3M?xp7 zQCk(EcZQw02HL8{6i2VF#oVWevu3`{`!8exVS-F>_|hY>{bDd>=)zt{F2*htX#0U{ zggqokrby@7LA1E=E`?0~jcCgMoC=wCz8*)hg@EL`J235j1m)QwgN3sN`!B$r1M+kn z7*d-xwYl?4K51%VcEtBZ@Dy^tCkV3(C1H8kavqT8B8k9(cR-UlK}T2%AZ1ECNhvbA zZF<{I0j7R^6zprH0Az;lOksZNoMdPIfuKtF9(**}IT2&A^a%a6=%_)xq2qx>S#`VrCsK=VMrzv%X-2h`T)#+?Qc{hnn$fCbOL_ z8eyRP2+i0yHZDP7FhiVWYiaOkl@|(?+a$R8RJ$%D*s*i}?5J)rAx@%mVgIIzHp1=f z=-_EHB}yWlqoFCB(5yqK(^OIAd5dn`p`QJuy7zVG4^X3GVw#_Yg{B)ws0IAvq|U#BxkU!`~>^~jUS!3eU^k!dvw znw!-#EUnIkZ-9mk>AMgp`wY)sPlD1laK$1aA$29wYo!8L$!A74uwH*Uy>r#BKxQ!p zCexGQa&%`Bl7){$rp)w`$(Jy`_2`XTpE>mI*B5{qC3ItccWy4D@U>H=YotXDINg?< zYU+a(L$Gs?w#1PW9EF;q(xxo;#`(Z8_D}e3M!JK_neL}9rh!;vp`I5!5ijf%Gzd^BBaQ4{1`^FG_m_O-^S_x1c(Q!; z^H*`)FzY`l#oF;?{WdMDIN2dwy4(ptV%T zqV)W^f`S!55W=EMk94z||5`45`u_m*9Z6|v5}>9nxH?tf6M4&% zAsO4Cs?ma^&nNCvsO8hCrW{RC&1WeY#1!R2tubI~$|74Oyq4yJSiwU}MBB23g{q3~ zuwq6-4@ewh;Pd_Z(61pW=Rg7jvL{O3bd4qPQs3h4$?C81Vbq{Bk38yYqx?XQ zAjy>Kwkut6bw4Kjptq<+f2!8jZZ@9T5n;mjJnSTO{CcnVdi05!IF}Xj`u4cu^%PzA z%e(s={|F_t_4qxhm3)P@Yb#khndm#A7|?jNkh6-gyltSQpr(^`TR;3}1QS!7(9VLz z(IQQ^zd%K8x|{gA%5{mcZ{xCg|1RE82Y2Dan?9P}Z*aBGHFjRJ# z6#gZDL#MX}7SlDxJK+r1?6T_(O7ry=nSS}Boh@b=3mDk(LiHqk=~$&vQe!)l`>2 z+zNpYf~`F@WBU!eqd;01aSQE68yl9TISSuP-}jC^&W>sQjP=OMEYGPrZ{AQ=p~OPl zol%Rb+UrR$s7Ut&vvUH;!W`Qs`KoSL7L69HtFd3@A9rxsUM<8GW*vgUy}Djc8}g@` z+<}yYZpq|n1PG_pThDQfmqT}NVTg71=4Xs;fFDF#ry(a?^2L^*DsogWNwu2hLpU^n znT6^6fw*(kFZxl_o1R^TAsf4|RdP7}xud8|mPkwWl#Ltzf|raAzBidbdvcuT*6Yax zSo$V^0Y1K;cdL$*HFjG)2@@%lrG5K<`%d5gj|*_+_IThpiHwUo`jvClud~zk+fJcW zf*p5OBmtjh(HxgVcXt54_vXt5S?hf_W{#9NzvGeisWqTi1cJ5VVq*mX+y@}vI33Ld z1NayU4vxpb85Y~&l%&!=S`d^ZFi!Eo5So4Fo=y2u`)l$owJ$`tU2wybXK}MI$&&Vk z&jwzTB}iOQp9u|v2=P?J=Ls7neD!ZHW(9jjTuXFpSGN;y=X=P%KVS3ZB&O(wdR0i_ zIe3NMVtXQMjFYM)37)5=y?P?q#6XacslpMG6I&rGa2~~l8Q0F?piWR&ei2pusCDUN z3=U?<7#J845?O5#>et^s>N(_1`02}zkAR{Y#F)w5q2ygk)ShuOe{#13q1L&Ef z^Ff;8cx0z92=dn*bCbIKxbX4iUxNd@!|22Ii*owaz@*|L@DlDailB1gjvyQV3MqwL z)PUKKo-7&{y1w6#FLDEN4GiHfl*sv`{TC*wefAqO?4i;Y(y^%n(b2v zbzP%jy*dm`I9Z1jiBO*ZuB2qgmZG?TGrrt0v{9Gno9bEmM zbFfLqy^-XpR8HbRk^@8_Up=wfR~%94S#uDdSA9HM1$FBAui4VG&Xhb075e`&_s|mh zHb+-bPF-&`)B~4tCQ=@XY^&};PT%g7P3aUwX03w6f{^g>MYN5%zKeh0UYOnDtIbl8 zfaGC^OaBxL^~nN_4c>Fp=>S)EUwa(dDQK_Urc3sEBJeGSp5BXCcREK3ZJy!bY~J`@ zdA=N>nx#2gXaP?5dCH>g1!w@-dIE2^sDLFc5gQv?-I8pl&zl=Sz4`nCW9KVNQBe{5 z+PI8rN%mAj_yxm9y(XnJfta2A(Dk zcy4d6NtQB&BHwdx*sSbK=o*&+rKn>w?JSMp_f?(D*Ur&|a zQ>JT;Mdu$D(FU_WL?If2cLUO3+NTEE8h(2WYCQ@?@1zbcFrU|L0k--D+kO?kZ9*rR!(DYvNBCkZ!fMUfQR{mcWb zR5lu}&O(^>0y)r*R1FP{YUZ-Eh$DdM)v68Wr^Bo;43Keh{q{56#dvw^sKmp&)?!e; z>b13Zi-?uAZiK_1^ycRJT7KyPxK{}{$M+nr0H3Itatmc)Vc|b?sOaeZ&I>w&0GEDv ze0&c8Rr|fD$#>hs>JJYGdsM!1#hRMVjB056p5&#U&tP-eZGxEArLDJn&Q@q~R<_@3 zx}2{L*P4u`vYH`$x`zU53mBnkzk z_7})ONk%62h4ifBEoRVPx)GPDU!D;LWTaynY1KfCcE9x%5{eq*izf5rHeH&Nt{c%D zy0;&meE8AhcI}B`!H0RogVo9VF z?%VzIQzX>t;kPevRFb1mT^okob^*d7;vYl-r|%uF9Vqu^m|!KaIIZ}*d*}VIspYD4 zP`y{Z(_b-20?!~cykFA(vSL!=rS$%gkRU0=!{bl6QapE{g#O|rNHi4<$DTpw&-Qcu z8BY@+MKo9AAwb9Q-7msAzGe^CC+tD-CpbrmlOlxf6yC^4OUtj@>v#Eq4$37c#pmS2 zOq=flY+F8?j)fC)z&#njXaPHD=%kfLyk>0a^{xz(lEh@cFQ-_Xcr|ucB zR+smQMdM?85Ab1*oH@%x8xp>;!DC5zpQCy9>GtYdO7-`$3y1fb5@Mq2 zV$UL~ks`_9jX!A+3`~AV8u{dp3Em8nnl&7lw9_RBCc4^H50(Oux#*6tS%nT0YaBMqr>)wc7 zFN}cRMoRs$-}jZ501=nNN*_o80eD@Ap}j&e1bl9Qx9Jh^Z8$P2MqnOUO;~!^&vL~g zjpuEuHHf&Ij=d^oUPiaLxfNgzF^W6FLxa#2F#ox|iPu^XB*H*4tUCP3^jh(^%L<|l`QHW4!T9HhIfaVaU_*EKG=~g9e^XFW9>aXO z-*+OHYNQ|FB9-|wX~HmTRD&Yu53I_qF6OV<2Rda%lN2VxB!oY|c_?2n7P_!$oGpR{ z`dO{grqprq#UwuJN2)M%@g*j=*tdfAG@sWhmlT6c!Hd)R2!Ti@*Zy1}2ywCvD3x~R z3NNdNsG8+WG?|KR7%VFOEiWDzJVf;M+G(FrZ?wwNHGZmu+*G8f9y8nFpATo0)Ct}y z=PPw(*vqdZdj>`RvlAoW0GxZMgoM7!d%*b`2=z6;x;pmb^*|sfB!tE3 zaEdgCaqsrj#m$_J_nGK)+%HW}^4wkBu7~+n+!USW%lz+Bl@01tIRFC`a zpS-&N7`1YW<#8*{nKmVm0kwy4=@F;J+1)VSVLu|At20gbE^kRVK%C?n(vHPFfJJ_v}tY_3j#*V(rk9zo+ZRBY7 z0asaew(jiOZ}cx;zJx_a8Uvzg^Yj6L_3(JNp!ey;4{U6J_d8XpOc@^^f9b}{zNRDg zBxBQdCEntC;j_F46j=fvudKjQ`mCJ&{8(G14h{|m0qU(ica|3r(gNfZ03US3yhnVd z{D{3xli1E#ICtwq_p0sYai`4jLZ@5706?i&wI9THI$ewdsD=7a43Of1eILfIjjS`oJ4k>fxV>zm%E=6Bg~n)ttvXD-{93G~D#?esy;IA0wD}Yg zGOllZx(++bPdj%<|JHnloWC<2Iicg!pSv}7)#}kLy$=%p82XG6HB}b#qB$U+3bTI^ZRzGwk;#>8Lvx3AQfmyeqOEf6tQRZ>goQ!c{NMQ^#a-slx?Hh@ zKAIzKQ5LV4-IiX9W$2_YY!&LGz5W6iS1-wOXtU{xj2jO15ZMqhn7n?wASbTpB@*#* zkdvK51KFfl^QDx%mB(YWvne&6@4jy5y#%jjV=58>tNj|S>h%s^U*Mc7eNHV}`UnXu zj*1GK63Zh|p(i0Eb}l`{srO8>rA@BM2pXZymadXO=s}U4aJ?w0S;cF0LxWjMSpQH3 z;=bEM>!d@}Ss!V?bDns*pbuP65~QI1EX$@XZgxCSmEoDEXJx%fyh)+g8o1o*W&CdY zvvKZ+UXNtKw4{;}3cv-}YdNVj0;ZvKdL5ai$|Wq;%WS|M;RoE+b!}S_4sqP`sPIW= z>+Ko9_nT?=bk%II3U|R!{F9qYWCN%Pfg6C(>k?E++NSgE^h@r%2`DI&Aseq9WBCe< zz+~d;{Zv2Ca+5?1!R_I6fp~*8tB4g=rcF}?Q~H$Wy_j)P?}RmsTE9mAaFF>LhJ1?e zioT%JsV2MmY9ZJ1My1f4t=;q8WL*E@%YP&Pk^MI+jTVSwiEb4Uh?=jAGYJsDd;r$@ z*lBi$j?}pq#b@#XGB7J!#p`3m^!#`R%{QYvh^&H{2sECaJ_%lHXQ{ zv+s8pgp7+Jd6NZg&#QMqIG|1=zgX4V?f8yL#X!p4KcE}l^jJ}jAqy;Q9PVvQ?z%NBKHxnn7_mnD`L8<*!E7$0@k_l(co{O=8~)8 zghjC?B7dyf@YW0kVmnbqr`eotQ_&04pg~1DV%&eMLML?tnR!6J+%@IHcQi+a@@rbU zEPH|JwF5me>lxjGC1vB_BPEOHA2O~DspW_Kb@|t(hn$C*Do3lo?WF<$t-ml)#m7lU z;F<>y-`eN2t@nmjLBPhHdMou_;daM~+y2EN66D9zn;(AlGkoz34E9%cPce22QNcRr~tt^?Z8-RDn|(jmRh{ zJ=TqDA)|ZSfDtpucmJI_rb`^5Uz}ts4ZT&raxajiWWk+<+$xRRC3jU8gjBghH1xVA zDmt)Td||%TJv4^#7OVd>|3<>Cs)Dcd$=9X|1z*qJ#KrqpM0U$9icQhpCv#{ z{*uaerNaL@X?2f3CJeS`;)&%uh?M+h4N^eRoXtqlIW|V97O%WZI~1St#)GK+!TGGm z6XbgvW42uAdo_Bkli2DkXm*i9c;m~7x0@}Yr{7nN!3Hc3Y5{M#Ys;9+*8{D`?Y9!Z z56cqkjs>9z^3wOAbF3F}O>`v{M3B;RR(KtUCm;s%jI?eH^M~xlBf^6A9d617Z*s?U znfq4o!?)Dr=qeu><_}y))X;;5_*;sXPKDKl{PKLdtP@BzJecZ}; zZCT`T_ty%wCTB@T!&lm<6by>xTf(tBU#+oqM>?e*%)OdV`@GQ3oICA%0MD%R0jn zgz9PclgO{0=cc9#I1>#Y!TIj^yKZlXE~9SQ?Y30IQKVBrU5ahtB@BcE<+t!(XrS(_ zUY?ypDj$Q&^m4{4vX54ucT0>5@&6o(auas2WV#dT_I_aScUgLeurTrkMn-6UYu%UM zYly1Dx*zdPv(vFWMUKLHewBfs6us_AJhGCdC$s$EsJ$0yPo+&|+Jg+aKM_rBe_o%d zYP4+SMw9VnEF84h?B%j{<%xu1v6)Xvz2IYF4g#_Rd2T%}M}*KoNH~nxw6tNM zex3ld)Zbq!_-R)Ig9&;lcl-hEr zh4ymxf42GQH^VR7TCO5)5VwR7cLnouCSr>3=`E;i_rFXYXRGWl!qlQOOqg(LH0ar~ zuE~wozr7bOn6nr#V@o!lLbVM}U_jBSW>g(!bqmX$=<>e3JDlUSCam$yTxnLQ2liSZ z;fu7#5|v{ZiE%>-sn-$k-dq`q6EsTsPaF0YI_G_Ky~Lcv6k9C5dB^aizX_Y)G-^G% z^s1aL)ns{Nn=FET4T2az!d>vw_r?9pf09sxynm*5-|e++-n#?Swu$PBB?1Gp>X2Ub zGp&!0T@AI@qceM4&$3!e2&rDaM2sYAF`z>x234mYAWa&scprB&*}912>CWR^0wNG!g>Z~|gw}MooMw?voa-^Z5pc)^CqcpHePyr2x@T&AC{+A@-i?ALb+aE1v0|MX zIiE4a$b5z8z^(^krukdClmKu6IRL1QgFuN4v_IF=5`=N_@d5Kc`;A#jNpz*Nw(dMM zfaom1l=YJkvv_vjIj8&mJJKZY4LlI)0EC(WnN8T-PUh@)4fiZKc^gRq_i|*^XGBl8 z7QY!iOzW_1znVNC+h7__GAyxe$&C1GWbhP!kZH4UFKr9N!<3R^^}TDVeRfd zVBt%CPZt7sA;q!$lsZ=R(pwGSx3+NTd)($oaf6L^f{cTq1yUn*7AD8N7ho>VdN0UI zc@dBOYws3wFTFcRPak3!pG1=18!_g7_FTgIW%$*avlB znoGbqpiwXdO$aoy4e#ik{qUQmw=L2u|LuO*dj>1h!s_xeO*4=+vVY zTT~3|1=T^|Tk!(|=jC{T{lo37QcRVS8XJoQWTtz~JOSzSt$yvLYtQ#*WlG*u(!ozR z2cOOe8-S|&C*pB5U|1M4*c$|k8mlx@qChxidUK>!UJxn%Q?9Ft;z&u!2C)whDX6pw zY0Y@x(8YfRQVGS`oH4onEYbBSxL&>VdNKK2jFzFtb{Ov!pIm~%({S7F?@6Rx;ud>` z1;#?IIEwUsN@85LTF}~0l0_Syw=ZT=VDd4cuiGrrBUwSv&(p<<@IpTGo%Eli(_3yG zibuzs&9X`7kag{Lg-x0WCD+p28UR@Z02}vBf9RhrxXuBcAHlwb>WU2FKnw86Km>4s zDZqr}r>y`JQ!J2r@fl*v>U{j;_4V~|wYlyK9_>NY$_{x|VdR^J`(xv&XthvoKL?rB zfoZufok)|8d5ipE0HIwn`i}fj7$+43Jd|B4ld+khMl-~0F}uWaqABaSsj2)OpG39$?h{+Yr0tsdjM35R*y$76HQXh072BC`{>2atZA1?%1`s<7Xf_udkLWUnoWhS~ zEEqgFauuItOntcq$l2^xZ2t2hcFjz-kEpZDNnu7Szadw3v)Z&p3}nr z7Vew#ii-eV`}58rD~xSBRnYkzdU&~s1rDA(=2CFKF=i5y{@PNf0SJ=*F9}O zrw9BRDXMVc<1G2_uUt!jUrB#|ze@3^H!k2P2;dXndwO~pBn5zH?eAx{m?5)izf(PL zyOIACoG2;HKrX!(nLkT#S1Ca75vrb}FP^cl75oZSTNq}bG4WMp;OaM79HYPy4?-f? zjOjP$O}9~pxvhrbwTCrN2R%Rszhsl{YjVW((evKc8qgP9enZk_Oym(2up|e?h&)|#I!FxHSrRV1cHYl!1iqX$xG!tPfvdoK1p(Zz5(34SsbGOsvrT_!$1>@g z&g7kcmYl?A<|}cWD;OF5go2U$Lnvfu{7uN^Bpf^&otnIDE3Rn=)!Sd6nCW~O!LNSt zlFc%z#2!`KnrAC23{0*!)&)dS6-<~YN_+!mN7HWp{OWi&DY=G`7O`YS1dx0mF4r?} z`M=ZXmw!GufD45P7z`6$ZhUl4R7hOd1DEIzO7@vgIfNnuQq{s}i}ML^EK>eZRaM=) z+uAH2+$jw|$3rF{m{_sx90ySA&-4A0v<+C2W5FL!KlZ-9ZuU0Q#Fm_JBcz0ixI>^MStUEt{M!##$Zgx056c&ByTb(`k~7t|=H9kh{^BaET>YZ!RNjUvz^2EaSC;R}PrrQ$nvzE0On6^d2J6jf1;hzb91&+dT% z$}Zr{2GwB3+x_v8eN?YZ?|AwO`~leM{oL3%CJOqTSMk1_*0tTv$jD~%rU3_fY+_ z$Uti1b`nv2)zvyw+O}&MqGu?oF3$x1+cJA}k%4G6jt=mi9gvyRAlje5Xf`IfwIShX zdUpH*hB{5ZfxCxlMCx>-GpD|u4e$;N)KN)R`r|)rybX-y_)P2`7IS9h1g2kS<#=59 zyG{VDD`2+|$&F$*sLupw5_*hc_yz#>f!j$%BNm?r8#6OA`x<*u$Wcq*mP4H4_p$C( zFzHN&fHRJKkwBPQEoN1TYGoL`>}_dd`Y#xPRr>n&X$h;_O4kI-9!l@&MW2==Jfc-f|(j zQm1tW5Mvk`0y)Bf!#sABI57IqnO*yq2RO4A7Ps0-10pw|y14|Rm~Mc9B%oD!{4l%d z=*^^=<&XBwah#zXzU_f;Cl!k&!Kl_B3~Bn+PJHp_w|;-e@v=RnDyEv6n#MvyWm%<( zb<@2rlBiJnoPmb|%kZ%Kx_~fps(+qb@oLV=h*-R%gTsNbq=S#UgMe%fKYnMY^qqd;Pw7zPqN`_y+reSBIq!KR2T&7~fY?VB#YHBqY(5WI9OKj-mp%d9ya2*JZa`yDm^n zjR7CyBPmX z!)A`T1Naf&xc8??857WAw7~vL^34L7LI8f@WQ()@okT?J4TToYB1PU%KM z>FySgkPhiay1PR{It2kqLFq=i;n6AGT_W8L|9$>`jLyi6%6&IB&hDOr72Qo!TAam) zP0r2Owo-bHZJfL6n2ZUf`v!DK7X*aUYD`_*4lg2ze$?^BPW8ilAc=Vq?LWOA8^`IT zXk%kjDPyjvt4mV8*{RlgG0cC+0zBRpL%|RoEp0ms3cz${XJ?^gH^=KeKP@NepaO!V zzE_6O>ZU(DsT2#otZTjKULp*-0L$zvj<1IJP@b1*J+CMg1;H;xr48ZG3Q5>YoM2I_Y}g(96(;YTyz)CA+o|2@r~rov6WDtC5qd z%gFAxq)>`DsQ-$xvlKV@QQ_h{V2)GVowD%N`#~Tix_VB#v8jbHN<3#`zTh%=-t@4i zi2J3j<$E+0<1FkxuH12xpW|c%sp+yag~)R02nJJ)b2ot*Rbvp`C(*2ZCiLgepzz_> zLW_A6$hRLqjK?=qQV8kj=rpP=alqi^#^z>RVxk0?cSQUo%I|F(gyA&>l6$ChV(a7m zVLg}-t6K}F@9pomJb$`#U|)|db|ZF(GBhHy0zu~xlcWhK=qt6Jo~@A+w@KdYGQn1x zSaIO{osuphUr3f@QM0@4Wu|$|^Tovwi5|06ns9D&^L~~j^Qj`Bw`|PtxzL2(MYUp$cHp9}0QEOi@C^tgg6V||!?7Rl zpu>(rJ~u4D;R*qRZ=A&Nw$W+EFysYV$X8A)T-(}I8K(Lli|{?VHy!WrI$$o_iyniq zUa9OE{J2hv!b{63du#N`)lUSUv?rzE?JJK9-N@X6U!tr70-v+ID5F)0Vt( z9c;Hq>0^=uy0oqgH}6jW$Y4Z>P(6S86-EPLRCdn7d_h8ke5>`GBBq%)bF68!uQYea zq9E%Sln^^WF#cIrqESghqZhEtv7!WmVm+GBl>xN83l8WeDoX&xIyA@+ zFTK~iBEqD3k>QI3HB)c1S*441^U=h6k!}bUsy{OE?(k5t;6iD~T1Yd}ziX->*slDU zZ{L6CstO)o_I+@71xt~i<~xR&xPy9aKUFiO8dKOVL*Zsmmo|Sn8yAAzi2hJZyz6US zsZ;Jda(-pa-akg-wM!m6))5VbY*TniN-=?FY4RJn9ia`&K&vjQW6A+zVa+z#C{lb&ryp8&3tT@{_p|kO}1h!_;9C-v!Pm zRGxs@0GyV+r7JcKwe6@H; zDJ&>eI6_IjEniuk`!mHsx{XQv*h%ASN0S zW!a)==+^jw=(^Otr|;DXWA2D=*xDLhLv%}2>k=zp1?8VzjfZu-yIX~r;|t~@`#drT zKzMbpp~frCv2S6E*i;KcNS@$lY%TeZBAi=zaHbhJ*56^qf-%)GT2XklK zvn#Y*e^~oHQo|YJ=kn6o?3$M>oZSQ7pPG z=W0M`_3NjZ0vPV5`X2ttd=(JQg26sY%rNLkB?i!X{GA=~Syye$*q9DY@Y#}w`cHSn zrkn7sCS>i0-wXLB6%ADZm2((1Q@|U)hh(AWC;ro&_3ERQtt!v__T{Q*D3yhJ*0I84eUgQn@F1QH9Sv=n zM5<6QYH=5wWujka_4HydJ=uelss(ITot&DQ5c-mER1c|paCo8CTI<}@XA`%RhqcCD zM%@ld^o9Pj)>cUy7g*KmbDBke?#%jlqB76evG6%Y%d0=Bc30P<^@I4+N&b}4o299z z?pwq2*8>A&in*)5i71}%JgX<>dN5-9skf5LPr3rR-c@{C|B5XNO)D;LHq4<_@Wtse z^YL-O7ezC6#|aw8CcQCsuT{=<2G4yZ+lEyx=@7lnL$h?rOa^qT7QZk2Wlf*mvRR|~ z*n^2ZtA&6VSy5R_!{{cOWaEupvpTT-aj$;LB8*x-4-^>|ywm~a1F9JUL89dQEGOd@J`+e{c8{Z!Zw-@rhb}FnzcSi$!|Ji+S8sjyki08o zbcVQHVc%ytRdUbH?Bf8daxt)8CG*gh|6P8g--7$JJ&DQsq5Y8QXw+}{-QD{6c$n`k zk6+y(VvClEl1fcRYlh!Tv&ceYcUQh!6u~ryp4Pt9Og~x8)h&%Vmh>N6&ZZM}>;#Y9 z@OO0vBqmzxQ!7U~Y#Y7f1eK4z#k$SOF?hwDyp|N|3K^m)eh~E-ReJY>5(W4D5K@8b z6-{EUVrHgqR!PgOf?Wf%c&!K}`L$UygDfZn=X60BQJ}V*29yRt;sh-j2DHD#qL!C+ z)qEG1SjuBmyaiEMO%lE8iBCDDu3otGjTv~S>ywa=BT1VdHlczQSMILvp(8(bFLblD z{fm|!HKyp3?}wK`7)C@{v0H(coTsfJ?^oRLXq%Q2Z%LVg%v0x7&G&`|A;`q8C)YfF zkAy*f)xXk3edUp^Kge$cO*DG^*w7xU^kQ58I?cPiVjJdiLp-cceYx3dVU=IYDKo7K zd6?aBMu{C~{p66&cRHEmQ~>EZVkg_GZcP`9M&>b~5o|nP3m7<9J>A?4+GWfYzQh^P zi;e^0sT8t^e3l9X;Rj-P^S9vTlad=sgA9}c5wwave(~HO>QvaLFfTxG# z=aWKL2T3Fl*)c9`Ch5yF;I0Erjt&+SdEt9!fs6}or$NL#e1X#hIYaWn28q*o_Lr}6 zHc6cM4V!*e?-_oo-Rtd*MsE&C5Ss- z@6&PE(me*_t$(y*2qB)ai7=`6L2F|Xeq&*7-4xvZFqqLy&H~pa(z?!MuqM2`*{Hvi zC1iShnyRnF>A|z(Rg_h(OapoIGm$_gSM&^S7~hMgxzXii?C965kTfZQmH0p)%Y{DgLG%d}X=q4eLa^j{p0@h!_SwzXOuWZ;L-M{sERJkLRxlrKtTJi2Z6s#vO9yF% zk~WOf@)Uy(0<5O}SXg-EVB+->W03j0FK3vnSBm;(OsJk0X8bF zriiMIRr|TDf(2&>vtRFIxl_ai?1y+GTaUCz-Hp-8{Perys7br+q7NASo`8*+W2l`U z0*y}H6+NT_pu1%lnnUX9>Q)X|x>Dauj22K%Oheew#WV{dH=wE+T7{c3tJh|2&!dH5 zZ_wkExuD zbGu<6$InVKY8@h%sgTcm@ouMks?OLw{rkO1?B=TCrqoxL{2L; zOs-0vRc9(3hJ)0Xi-Rm|N3G1wD^K zgzx;JjbgJVoJCD&;mDFB3po;w@2LLbg~4+5!*}Mo|zqsk20g)GYdrrp)gC1W$6OLZZ)r(W zu*kMG>6(lVG_*CSB854l;D?Fct(`B6w6A3Sz{P%YROZ$~ZP_(C_gYPV2=CE1aa-v6 zvmNjyoKj~W$vM1nGHvrE&07y?Wf8G&I^Xod>yshhVYTh4$RUkONUn>yN&Ji4Zy zf0Uwvrj1*MJ)D;d*%b^lzfrbz3he8tSaC(E)cGTjIUR~J&If!uqI*QGyON(e@g34{u>ueZgO$=e@YAQZBsCl;p&3Rg-!Y~c#9Ng{}6_*uL z+Rkf?^7-EEsoFk_C{rvwlaTfM6sMUw?dF;0o(_j}wK7aQ=zZfNbi3w5+IXr}TED8N zhCTIb@k#vS@ie^t$|Co&fp}CDTbl`0M(2!~llyP|veP;COQ#v$Rx=B6?2kzY9{zcm z_dzZ=)iZAd2y@LR38%_p;+^*1G+gd}EaG)-h8~@0-J&g^{CEfD)tyw=9|ASj5EIQl zCaW^1Mw~o3MdzX58(;lpPxY+IEckFoA9#~OQ?R|FGQ&3n6`z`x~@u1a>Yn#lA<<=Sb2(o>6 zGO^E$5vDO?F68A1_p_mgfd9q-?tJc~WvOXbptQIyx{1YiXcjJ)5_G;O(pQ!_Oa^4ekT5QB5< z^6xRYapEyp!O-i!OTaOque&LV7jkiQN=sOKsbbrDN0G=Rn7J--31QOg zml=5sTx+4MyQmVIEtBzd+V=%7HM{HR+Izw8@m@BSD~kW1E)b-^(V+OfW?!`*BT)}) zzup}C1@$``%ugF(9q8Xz)o6J?UQPejYISY?%N-#|?I@~>#6D@*AV%Ol23TFu>acmA zo@ad-(1=TV$5WYbUNM};eVnb&r~^@u@q11EDufKLgyQrEqU^&Ymy9}tsd%Oj!GzVR zyKa}kYaJFOg;~)h_iHa}lp8euG~}4CZ3NWO9kniorNqN2p074rP<%BiF$WxDKdZJq zm3&A%mb~1;Qg}fP%eO+}jc-Q69_-__ZR5T+#{01ME`cv#P`?Vv27izJMqrqf8zn|{ zFjPs>LCzFUl+#eUANQmm`81TC+F#^D{cbm6n!ACYvPm+OdD@kZnK=ejiQ+(ICLS;b zIdtKuvw-0~E)wFr9Qt8CeFE7^@7|mfXMX4zAT5V-a{2K0T(H zbm8nY!gbiB>LR21{RPYek-xn)#fNM5JYOp;DqRB!A*kq)u7C=*8R(72XX})NTIiup z4#N$R@HqH)0NgX@-3T==%Wp526KI!pbad()7$6tSIc{^u8kcc(h3DHau|tS_igIVM zWBlW$7BSM#MNEh3a;wc9vJV>ab1e)S1b*b1#mA5Pqs?mmAFU!v&g}zoj8J~GWA`-Z zE^=B;Q8hAv9}TtmQgnz>mzeCcu+X(+zXSgZ=rLW(O|{J~EDQ(TWy-3mU-l~h|93Gc zEap_b!kZbN|Bk|2c*pP-`fvEVK>s%&odEb&JCr{gD4e93VBvJ2OIt>(-($PNujm2h`hHk|vkf zH_mUZTSrc%Xe{JbGIjQUMoKJU&Y{B=p?$Tw9c_IjY}mElxz1&q7Z@*rLPb59xzPbW z_-2Fz=cH=v=Vzp4*-^fO;3qm_5+3H7cUdlZdvYl>t4g=t_po`(u1yME>^li4vSo%% zFLlj4SiCD$hVFvgWUZZkhTnq+lye^xh6bV3Fg?*E^TzgfHv=iVrT%M1JlEVQlra`w zF%{%BQ=P>;U$R(=r0F#D-b+}>k<^qNHxNJf(Id`1F3OAz8{()5&c~s))YMQTo}YSa z#9HhwFx!O)SbojsQ_5ODdf81A3*!1Z@fmN|#DFf^x4RTs$F?WNms3($FoIsS={?n)sBb^unsB-lGZYw@yn_W89*S68;u__O%epI1e#FEFaT}Bz zmib?&P1K!h4jt#Lbh{lvyC3Rw(Hwt{Nugim7ow`X{tAo-N0^0Z>%-CfxIx>S-{Zow zIAH%ER2R6j&K^vzUI()TqyyBXY9PD3Gea2?{aVo`=Ufx7E`A-9avvqopM>e2&F7DL&Ew>1hm65~NcxSXQWtq{2O|K`Qd|A3~!kS3sK{ITx;+h_tGp|Sp(}ha3ID>@)*&O?>wu_-5;wvD6lm7YJ?qb> zwBvOX&4Wrf!7c-2(d4)RQ@YT~ftT1g=jPqHhdnPbesCns5~;Qo#VEc>H94K7=#>r= zpK0!AoU?VOMi$grFX(P!*rfmaRQxM|*L~m4FTx@v4Pf*j5GZZcGi|qdXI)Z&C<(Y9 z5;CW-RWy{M#yR?A6y$=sa>K|Cw_PnT_XP??Uxd?Jm9KGm6g)j)3jmJ+G2;KG;ss*V zCk!KW;2;?^D}w-JzHZ+uQ#7MyV)823;&mfRMQV2pnF0;=S}>7qTa67<;~rZ;yFul^ z+y`rI6E_E5IuW%Uj)Mb@a8R5e3B00vC1>Do*Eks72gH2WR zi9AWca5+DJE|Kn!_n2N z)K^+)sI`kqX#@e_o{@ziV%MaLc}d?{UTn%*`?|9Mp+u#O%`pHxcr2Av@p515#Ef(N zPWURX#sb?q9Yu~90v1u@p>I_<@NJ|eR{v#Lk!a|Yh$tw!%)s1S9x6OO+{fz%V`P6# zbD7fANbLGKA_Qi$kg#6Mz68q#E5a=?+(dHsM0=muR({V36Z~;`*%8gQdg^Tn)HqoU z6hhAn6m>Xa|D0l;c^oyQ;UgskrO)S7=Pre-kc2Cegd14Nd4?yYaR?U(F{6zqyl?;) z4*tEGIcY|Y$78GgANRU#Ru2ap(JHimw{VXtZ@do-JI;=O3i2=!o`2Sz zV`4}7J&GJHgqD#4!z>8XLyCTjBL6UA!HMycfzA?A%v7|IwC31vDLOOv`u8bV)S5$I z?^^CJEv$(7iAl+VVpM4q ze*xzVpbNaKd*5NcT?U{-1+9Mr={PVGz3SQ+50)}8?EgJlt?9nOw4LJWNK7uqcjcIh z`E8YT>PZTpTH^a783*3YYVri-=S{W&EpJDRzJ4pQ;Ht^a&qsYx7_-KK!uJCn;MvFZ z={!FU8^B2^z3NJNY$?!ehC>=#b(`+H{eqEw9{b-)OpJ|>8kg^*%{ls^^sWjD+n|6T z@2c52xT=UUukW-%el!-rvTX~}_YD`z2Q6?bpp_5t90q2(yy9H6JP6hAbBrTTdwvog zSpv-k>{eTGB_*O~F9Bf8%=mG6x7!eZY*-Aqgn*Id5cH*|^l001!Nao1c{O|QP zO3{jYdh&rf6C;rHftI6z7To7RbRnMEoc+pS-PPFpRX@A7I|}zlm>~-@PW^A@R&v1p zZSKlwHkAeeYpH__e(UG)FDfIm?k~qe4^)Rv0ZcK{2^aPR;qovn^!aP+9}H z1+RAw6PwurjxKsAM_0ef^<| zFxnvXe-BmxU#G(ae5k)6zK-{C7j&G0*kdEEPN&39Luo`W91>#Da*y3|i>P;4fDiOsMbGb>0xws>#G{5J&!ceMuboo1-G0Wu^1H&bjO0k` zm`9&UcPM2HDGd!PW?aOEO4^31LH(c%h(1buukKa^bKv+eH8p+j&f_yPq)lJK#te{C zE`WfEV5Y{j-R|bzNI}zE|`3pZB#|T`l1cE431fFE# zPr!OcGmd2RD1*-PQ@oBwI}3Rsm&2fF!ca|IUW=zlY$^`;L(5Z;8k<7Mi$4@fl>xL@ z3#DrF-1V<@Mm~372I!%aumvOsM>2u(&=w{lWF6hSpAo-;jM%e|qH9@eSt$r(|LZ?r z<6nT8+%?KO92@ErXCMj6AiiNS>Bn|-a;iTa=GUyVC$F)gZmdclYtCEyff#nW zJQ8xbv@~#T6=j$&XK3b>fg)y@|286gh%;Z!gDpY&Y#y3U?;WHU7r=O)e_HRM^%jC6 zLgwlJ!k8IkP+kQT;W}=ME?3?{P|t^ij2y2!CC`eI?48qrjE&bWGxCNfZLbh|QxbQE zra8%&gR#koeQ8iWL*X;^SSkeR~&D zh>8ROGPxEr)PF<6(MtJrzGKjc3;IZTHxpDenmsv>L11gbIm^jS^t`Ej&CLB42VwHv zn^~QysnYG%BH21xINYPY?W3XXF}7M>jhZeP zWYX8+o|mao2y{*fCPp!`x?s@oHA($#b_(CGdC3Wv4P1VaqHAo%a7p4J=4SNhH(#|}*AFU@WGC3m@>#!tO3 zznXTouec6~94~FK5TD5$_RTd=JPD`V#15A3warbhJ|pV7^`RS2a!# zLOACGy2BU{5-9vTVbcjv{eq4FnSy8+RCf{K0zp>+KE!;sUGK2PzMYDO`O}NMY!3FNI8+i@?!s{#x1NTQz7AB8!xb=DK|UUa1Zf(TXrsJ<;gS;su*iD zF==~BOLLsQJ;@^{yZwYwGVsA~%=>ugz>XUK<0&}UYK#vD8dc2xOcX%1Mxl)k+IG!fz{G#;?-d?K5??n> z3Wni+wW<|m?ew~6=#iC`d6H#9ATvxMDzTqsI7k5#q~*9L>-^g88tfqbJ>oCzs%!*J zL*D{Tndvu@3?%TdoTFO4G&&ujR|cMWt`HNx_@mz-Aeg?pgYmvU2WzH85brVnwra;e zAdPi7UcmmgHr)>Uqtx@l55E$IWt;g99Sgv9%l_vZMh9ibe*kt{*{^J$Qf(W;0IHn< z&2u)kx50Cl;3W=UXfjqG<-++ia5YkRnXKM`k+<3@!A|p?*(45WiTTbb!i6&>nPlFS zh_iwZe4jRUgVd1H95&A-|BX*;fcq8l5x6)I)0hNP_IT&faXs&^~)coIprPqPoNDvW5!>1A~pCN}mHf(EYhk7N*T z?BkI?W}%Gfh%}jEJ(f}!I-9`uhl`k^xi>f7`~urpOYBf=sf@l#q7=UWz?W;r{Ci69 z7&)bT3KEFh>%}UAhMf# z_e8ACDH5NQsT&eTTR)ieWtbk2I}LvW?Q4smiV8E?zr>qR}h zQgOcv@(cJw(RZWr%dH^tqRlO^=W~19u%|)!ejL{w9H%kDfm0Y4d@t=kNJ|%FZnt3q z64)RNiat0rV$j$iAZlnxo}}Z52%1?jiEAlo<(c6(&iRdon%U3ORe#b1ws!Ppl1g3^7jqNnbppT2&-WaRt?4cf-Csmfu!gC>nEe`*eYL>gAr9#XY85-;?iL#(cnU``lY}5LR@i~ z-)_4-v-#e7jiOc-bkM9si56h+M{kh~T=p?yukdVbeK~=a`|}fILb6E^!Igyl^R$46Ws`A^AC7t-F}vS!@~oNjN-YsMkZg}<+wJ^0 zluDt0O(uA7~i6nV7|KaI({x@@YcciBVv5)t;5L z#9qm{)-jFlvEzP-;UEAnLe2D-LD#qT6Fur6bK(C$0TF0b2uEF~9fZ8S0I+BSoJwy* zL_|O;De>}&upe?4?%kmcW7e^}mgdhyYP@^oR`yS5EySY72m?jxwiVIJ;fAv?yhS5@ z&(j|yYB#xbBL_($3uN^)F1m3`Xsm5ceP$IH7#_bS+hmPy{`fzK*B&2TFAG`XB}Zn{d9~llYaVAGRyq@;?O%PT7~~F#HxBJpBE2=m|sM6 z)d5wzub_1bI>P~o9r~DTkkcFKa&^iaJaGa;s2WqI%-&5(XS+sb+tZ4#j=@d$+2=V( zUw)_|!nfN{M7tQ;>tW`!LYPk0X82hx_uh__M2DD((z3@M$5{fEIN;$(mqqTqJgH@- z$geEQx`q531kX)JjVqt*RtIUMD>t18sB;JI@V{*|1pRGLh2u&F+xku1GkH2+8&d-^6J;6M7FUWic2hfxf`4wsX`$k_mww4C+Kp1 zSPtNV-o=rfS3@{2hX~q2t;)ZrYRg>(mLIbGo})b8c;+sU|1_|DQ`GN7s3Ds;ktuk= zGQseTZ{E=E!K=9PtqUR!db~sTMU~8g_KN{{5-u7Q--I+~I;n-B@(028v z`M7yD>OO;guWlhd>v-n)YtzhL&F@6T@#ss!`hx>`kl1gWfj6|hgTc|dHK_imFi3pg zE1iB>sb%>k!^_}heG!c!LHA#gpRQvs#&5dh^KLcn*}WjDP?xb#7rH?>XhMZM`z6~T z%DyTaI8kM+4xk7S`BG&;Yh>hakzEh!w~aJ$p-poYHmN|G2fu7Ob+5A1_D|+g#8Sax zXy@+E+F|d9h>V^GRb<54Rf_r4`tkYmWLst3rzol|UFQ}DFYm{^R#+E-qVR9n)AH{x z%8TDxA!VX(I&wBCg}^933pYSB z9Lbd-fowgtZMvn?8iD;3Z1_?$c`}-9t@PgC9Hr{rf49~ibRAG(BOU(|LkF2Q?{=CK z!t-Zfk7Ismf!?|wED8Se&Z}IyX9%v{COl!C+#%(;Y%E{u(-}F2-*@B2ui8LXpC@7M z@$}b>)}$i@t{-8HTQ1o*KeLUIub3TS635qxch{) zx!P6i59!Kq6_y(eFR#|OS_Pv7E$bjaLa5>LwzvEZr9A&a+MNIG$TL6%-3>r=&`Ss4 zcPKi;Sh9Jqw@`^`s3|6*1KHIM>nu(m(^ApYmm939sT0DC!HkwX;uYX;9!jqHrQKD( z87J*qs3Co6VAdq)wYs+eYtl6;g9mZb&mNFA7@~WhWUGxAO2^d6ME4dpp!`tr_Z0i+ zxCZSE-I?Di@6Z@(2^HT)+m8Y4sOmLe8pFEL=W1ilO3}m7SjGjd2Q$HwIvEx?z^aU=Gondbx-0upC%&m$l71Eufm2P%bHP? zEpKf|J>Q0p3AVDcq)OAv^Vn00nzA3`nX%J=?D||rZH2&wopuNFO`ypW1=?=PKVPWu zygE(|ZXGOu7t(){so>9)&hZ^Rgi?a`f}nI~3CmjuF$QE4p*5oaLK?L;78Fnr2p!+q zD9FpmZfVH?T`qZD&wIf0|L(RLV#g^NrE*3#QFYW?$$%2##H>Djk|0i}saqTUN|pZF z`JQ`THW+W(0c>G^y9dP8vJu2!(v)B3t;=4k^-%A&Zq0utLuhzLJZXVlQ2L~wEQoCT zJ3alcWp6n1-XTd|;@=u5Q+ELmXJVmUFT38@5Esse`ZYf}Y>ww?wPWTvd^G3a@ZF1h zOCxZA&vSnlJgr$Tt+LR`gdZ5v_O0EZ2P-hwhX2@ZVd;X8A5C+%yg8cqmIU!rPV8j; zs*5#|0NU?l#AA$3N&}0~c>FBl7_nlc;=mf^bb5o;KpPM${s5{N3D}^Dl zLz%Qk7xN}seA|_i+|!J==EduZc(V@8^y&~5rqQs2Q=M z{%232EgUIV?ED$Bs&7!<$Nv}X)u537z6Edw}xCv<|Fr4JeGs% zuTHPGE2j}PS0$#emm5a#GXxQ8XQe9ZaY{IazcA*mog;6`)^PPFwiJIcNH~mhrq+vGs(2A(5iWuSh23CcfvZ$R9O$b4m+gW~R?iK6j z8dga%29A?5Zy_HA9JcQO7=2LUGO1@?l)0M9kW(zb6}yJ;&Hqg5oh< z+;&H=Ja5lJ))l0znQ;R(oKHRI8zyO=RY;ew8N}6D$MJAhP$E33Zrjf6C=_3L!)_of zE|Ql%^AT!~wW$7Ox>|@i*uJfq?87{P0MT21xZnnTpUEtS&q}+4Wx25QpCDcMGbgoy z2H)?|!5KcEwUi^KXkXbygdJjArY2%W&?TLY-jwnDfIRR-k@#6Nhb5ev>|ECU8Mpu3 zyyGcG;4aHuQxi7FTG>RvgG$|yOL*TU8Ma!)#C-*muYuicE0;aO`cXKPt~Xebq#+wc zw+>EK8KqzcHJayPF(>>6l`=4WxYLx#X}l!dow$a(>_gd7_}y?yzbe1?)zy7C+O^kK z1k(8Xv;Eq8mxKT`5*!jfE}K*$FIcORQ7xMnmR9_uPW7ydUTuq))THwlzB=}Iec@s~ z)!nk(Tfzn zJJeypqoJZLWm082&gZ~C?C1McrY)`mIBR#J9lPS{+@_Y#=t!*wBC|$t9wC(uAIp@( ziS1qs2I_|G;ODG5^4%iar!pnzqU(`02nGC^!)Av86tEb93#?m(}a6EXW@%q*$k&MeoZg&mbhp#7!Ia?7{|yFW#?; ztnxKOR%+eQc{~KtJcF~Xxb3|#IiZba+57*1-|R0+(qM?W1H_ynE-swFH*NrI+!VBv z;7d5Z{=$yZ{9X*(-Fwjbr`3I)2Id=mn3<^iiybWBJ|+)#V%`oGv?2f|f?guH|-F^E)3(PnN7K zC992qf*5V{evIVk?G5%}TS+6r4k0a-njJRw`5WkJ{}8FRFCqUa{8W60Sk4gVu%9GoxLs zLIx9pXzB=OY-hNOcwlYwy}EhGQ_HIGdo=@te#hrRefzIWDAOb~HoKDi+|Wxzjd=^BZ?(WKd*={1 zv}K^DSre%x(es(TmcWMLk;_McgkwJWMuyZ$bI0TeQ$$YQK6DO|_CYrl;0KWLC#tp-M6T9=SM-fgl!};}jYTIFm z?r;$42SmQqDJd>Ue1A~J2ZBUFjByr+6f{`~)j)iE`JTv+h5-Jh(7$WN_z#mZ#SaFl z>r)V2=DHm&Ex69xK-<$wE_KZ3vE`}LZvDc~I6|NOEL(88w8Q^4v9(`97l_vy;O6@g zuZ82vVlnZ1h<>Ko(abHJlMvgL=ckS(`|*tWa{cM8Mdm06Liu15bYt(@?>p&pQSptW zU7>01&KKM4HK(h@6NbNYT~JNH$H)Wr36|D_$9FQXE~540eI?wAo4bfuDR=5pfi5qN z9gXg@>ppk$zlWXu<@5imVcDo)?gWhQIReQ;==44)6Uu68YW|!awIl0NlB62;Yrt)} zvL1O}#Vdk8DH*>dW6f*}X;KZZ#iPM33A`zhK<`;AZvoqDTVHHex|IDzoXS*@ME;Cl z9xi5Q>*K;bPwPlQWbdroIF*MqEp}?)k+lLGwVUAQhzs8M$6aoJp`Ar;#-gZC`4N*g z4SOziyi|Tvivj5=hyH(WWhmF*Dk}&=xoALlCA3>BnuMRtu;Y1nczB9W=W)U=wk_U! z)wX3bD+a6S17Xt&as7=0UW_waH05g3afG#8Ev09#IKzI`S4kZAjl5niJ;R9Rb1Uc; zY`1CnjY$KZ^FlO*X^)0Zz4!PgY5OqB@SZ|m!ZXjjipS!xV??!^Ew^=C^S4ITp)Qxc zzbl997=8PusX<4HXtuehqDty`>oPnBkN$z5Oh^l5K>99W^18xh;5xLp`t?A(d?`0_ z8APmxJJ7|1R7md`8RL|3iY@|=O7G?7FXj^Jor4K5*!1_+%suIeR)6t#VI`^~8G_I@ z2X#G=ms2K-R`#bk@8LcUsm07 zW;n=-5n&Pz*0R=z)8lNiad1S|Mgl z9K6Oq@QtrHtgAhX7ywJ6s=X^EU13W@w;S|Y-PnHre?`^*y&@8PHe&>+R{{dzW`OV! zsvZiMaq~0}yvEMMAMFm8zV8+9dd0N*jhyb??tPtgnw@2j@B0!&75kjx8^w#b`2JCf za%Cp?k~|rklA?amU*CHJLVr=5V@EKW;mk8l)8YuOSkS5oYGu)4i+9Ljg}$cRV2hXC z@}0eG&fIo;n0fa)ewRv?E!JDl3U%*(Z~OY*cVsKm%;)Oh@UTa?qNyCM#kuH}{>SCg z5NpH$9Lbj9OTz(D`!p?ub;t9gZE-RCKGA`k2*z_(do-Ht8r-cMMj8IqzUh@N?m@#I zi_%lxJfGNPZYhq5<1JfvZ`U^BvkdSOH7lOAE*SOUT3yCj*gK$F&)Hn6NkCM}+HY~a zoKZWF?KCQ1$<;mNN^q^+~c(J`O{M}9b1b?1(e7vHIY3& zmhbH+S(}N-TUw&5zmD^+ca>wTQlqrAG`Zn zo`|d_pZ?fg9TTsfZVX;oHDg@%?XLV#j!OXpfR>Y_N9L7LeYi**s}kN6%oWl*@lfa& zG5n`)3h-ryFmhVA5`bI>Sh;3eeE2VyyqKGto2hMDw*4&XElt`a?W*<|h#%7M{G85M zQ~s>@L@W{%_S7Jq{Z6j<2kyiVcA-;R0ZSPLPhLxfPV> z*2OyVk6;wOSLoedm8#D~GBlPM$9vI>EPWGlEp&iI}mD%_f>oF2)D5f6d zZyh=0MXa|B34`s?gK&Gaw%hy_<>VdN;sNaJf7ze)DM zzEUv#EMwWo<76^y8(#&v_d}MJHrnO4yz3>U-y|hAEHlEU*O$`_J|C#<$)FqUD50VN zy*Y90zY0~0pllphCrPb?nfJ4q4p5IJos(;QH{u6j;V` z!@C-!ZU|w_OFsDNFn|4S5?@U2OSb2)B3rqeQO(M4x#xf4YqzIhcuAm^U24>zPjFQ^ zk;sHm#b!NuLd5;siEi^PVu>iVTpN2Qh7SV?wlj1p$H`PZ=H#2R*ASopYK)AAtk1 zpEG^d8fJ|04(#$5;?~}*xOm)`blr(rcIyR`%i4k`_L1a^ zPlmz*@fiP}nKM$9IQUq@24oLMA${AY-uJuu)L`;&N5RjtKVaSc*}<)3$W`WZ?^{EB zL2LSO2>j1am8&`X!QuM5eO`5HFDlAc-*S~ixNpUxo?2Rd;Eg6PhqKy$G0pA!4##dq z|0vi`7Q+^vcwue#V~oBZ;$-mU2Z#+uf&)_Ok%VusihH`^VJ%{?U%1MC&vV#*{1h$Q z4MAipoK9g~^m=6*!O_^&fO`8`Offp4iXw5Irvqk0MsZrIC%azV!ZJatduU>G{)Lgy zkFZfk!+{H~A1_4L@BaL%Wn}qbe;E@kdSe`FE<1?5Ixa#l;wjVs&BMfGf<7M$$G~+YZ{pCsq}&gS4+0RkgF32w&}K?mabh zx2nX&=O)QAnsHhw=Fs6mcSVmOb0bbx@{`&%hML;A56b2Y&=(ZU-d*l_ZutaS?-LkV z$I86wOgQJUwdB0(+gD`JXt`%GMn?&ViM@UO9V6zA?!+2jEF+;K{QmT`pmF|H&vJIQ znX%C>{KvOE2EMxp?4QNbxVfbzRZ#5MzE*@O+O~fAwbHtR_K3Y6W4H)MS3(An*bXEh z>uhj&ex9Vrc$fGqgDEnzt0C&WmaufK6D+eGLLl%)lgbP812ZlLhD1N&CG{m{*au~Z z<_LSnkE{`=4y!mO{kQQ`b{WVO5G(&$uvF2%{(~Gd4EkA!dsabrcN;#0>|vEmSzrG# zw}%UEY(JQJ2_7x6^)}>~v`6xf1#wJU-V&K2UIA=gnXC4LMLIh<&+wNOb^eur`gr|_ z>hg+Iv1h;Csb1wN?Qk@-icmlRRL9 z-*AQX=;!;fRt4Tu3s+mGtA~b|PG#DdfA!jI`)qCO25UV_Xf_thq`bQ819 zx9m=4i-HxtjSl($9?y=r=n7p_#Ketk*MnLYh%0nlCRKwVtF#2dxFKbNdy38ED_+9E z*}~Tx<1T2ZhF-y|J*{ls248b%9X9MI&<#dHSB1Hz=2VESf2>qyHO56tK+>E;Yb5Mn zpZ>6B%w71ze?h#|tMw#pj4RC%-8X;#7wqhlymA7pji*O&Eeb1~?CcL{zL!m|#mDXz zM0>w$fjK{blw8_NAhT$db97rClE65PV<3xBScV3@=8lHmkkJ1x{_9I(w9_8HyB=&L z*p`zoWh=KrVB&Qs;XN@-C)Sa0>-NfB?Z?B1U&IB!@MrKYItJzqy&ki|%E{Vf=287Q zbvT&6F~T0(m*jM{jGVrLL_fXi+O3|#4w=^((dtd4us*OcG5u+%)JR~bWVr8sMfAM- z@?c4#zeY6wkl#H}!Jx2O_qVLcld?zF07XKggPHlGbM;*$ZyOH3 zv!9cXZ7&455LHZ7n&BgPaqrD9d?4r*by|#rxhO>EhiCATm?a~e7GGVESJ}O`wj}@i z3VMzIZ;j%_Kqs|r!_MpeO*Xd$2l(i9`^_)vm=RsM#&_<|9`3x*;uJ{ddM@7t^182? zgp@QZ(vUr*DZ}c`!8HbxY*lNqMJeJ{7#LO=&j?!2RKj~oT9NMs-JX3TN1LUdr~mC`0xrp@Ngm5gmWr6n$W1t5GhZ$BiY*AL;&?UE059Hx3fsW#oO}lnLg>?H}(iRSMoi+n7dw8-5Rd9}1@5vew?g!+-{7d_ro1@2_7ke3t$(x65i+F@~3vZ4#V#tZRZwDVg8e_KZ z1SV9>{1!MOw3m1wv0Fi0XeV1U*6#=&(`YL=6RC#}RLD7(xa)p=_T@zyaGc(VjS`ds zjeRHmXf$fwfoGxwzAb%53_2|6TJdlrB#+isWW_#=22L^UbH}?9$cg&4{?``Cla5A@ zQP4NIG5F(`#^1-7Us@VH;=RS5ApHtzxI0!^pd>9>za|pOx_7;!YmRk%u$gqNuUmIf zP+8qCpP9?SH8m}+=GcU&p{d`^EzeMG2{kS{Uh`t0YnyW3zD-l|l#~b!(Pqu^4A|E; zG{>M&`aS1gjoY~If=?P`EXcv}1S8q?nIBFkA9+ffOT7Dc1+fAml|LN`ZiXf8D=@gb zuk*QlW4|2;lt1-nG<4`Y@5m*USvF^6sKqVxI<#YfS7;k1nfZ?_Cw$ zy~0zTzv$S-W?mThlRdEPn!Ydc(dfVeBb; z*_VWD$-ajWiL7H62@S^jN4D(E2sM!{AyGtwvSf{Dm?2rmzNWE_eeb&$-}lWqbLQMR zbMD;xKF@vL=l6Tx=l9fosT{s@*Zmt)78SO~*oP)lQiJ*jw@_1*PlXV@dI)5`+M3iO zXwd3Bu0olZDrhk>2xpgDjr2NVfx+HG%dABy%}na(pDITa>2jRCFJrhUP9TAA)V z4NPD-U5mNC)(v}-mLyU8=LPwX6%A8AhUV=^B4k9H+&UHI+b@|ObI<>9G>8qG;hxb) zn)#o@jD%JVWH@!IVu3E#QcmaarGI(_(=UFl^u~!uan?NRxra+guc6EMN3a&AE(HuA z*J6T@bBwyMG6c-B;So0+B`1X^?i7BdKD^LB@6p6%U7TIU-B`pEkrKLGB~^8$3zcw` zaoP0XF{#pcNo)EDTd!o-qx&zxY&xDx?G*J$yD$2^2I@T!KrNOxfEhL>qZvLswIv>D z%oR@_taZU?+rDMa+|oiBur`OVTIt`%gnP;!hNANns z_}~(i+p4uWl6*^&peX3U&nHnNL+Q!Z>+rR8Pawz zeIb2ws^FheYD zFL>$4-4E4X##Zx{903eg0QhVPMrjwA`_vbZ*^L{C%LY8kY>yfJ<^e0uQo}RS*r2da zH{8qYN~-c&;x%xnYHCHwUO!c%oYfY(qCMg9hu1B2`$&(TwxB za?hpeuVs0H3dc3hVy^a<9=sN>gN}mH;tSZ1TiOBPdvMP%GNZ!8x)}s9?OC&!ZRAF? z2jEc6=Q58K3@jXQR>-5~or=b=Qn~IP66G!DL%Qp&VAyAMD66I45R#+G8g}`5oLwcy zb-CEO7fn!?^^A|*emwbQ$Nhkiy&Af3l^SVFsdx_{E$-3@nfD95jfE?U?LnTZ{PBDpRMu))1aoKo}#mBO-%)3nqaqT2@ zK9(%GK}(n6Z_u3%hF+o(mTaiXL8RAWo;mUqhea86!>CSROr>ndwkN2VG>^;04K{4w z7$~&~)L+sQf96LA>| zgbrrTXsdFng{VMdR2C5#stOuEW^Q6udKP}`9c=*2F-c;t8E?j+Wd`Igr!mhPf z#C6Kado160R^EGDNr+2&AtxC=x9TX-aIrpZBVQF48E_F#Kyn8Zx<(*FK_e4A6j zy<&H}W!3Xsl!RK#nI*g;gN>LZs{4Wq{KwMX#$~id7EdEX~8dy&j;earaqbf90rl7|KI16IfVT9@Db8k;(0gh7` z@1kD{tw{w36Xf2(qSpH#%!wA^TNax;I~z&%hf%Y7Ed3%5WNqXgOqOhe{U%|wmb3R*}Errmd|tr(A=8=G!o~43be@#x16cOYh{!5 z8`v43rbmJ>_(?deGwZkdhNWl&E}j@)UCD$+FbNP6kn~ds+Q_QP%>bsb1tGC?evh@+UbKDX^iwc&aPA{k?j!u~wGv8naKJVL6J4C1 z;Iye~12_4s!GrVdzPlPtA5j-Eu3yrg-Fg=)0Xk5|mD6FHE%*aW)44eK(bgaw*a6I# zpdAD>5U~Q30a*?&zgrK;(d_A%zG~0bZoTk zY|4R;qW4PdX`UVEd^QWsMN5}vDP7Yt2qO*T_$M3z+SX9~Y;K&p5Oq%1_fPe_NIbyg;E<46)O^#Ecy~ zx5(Yiv<=1P;+tELvYijLk52M_BN2+ zMpy>~r0z<*G9G>nDTFBP#|8T@JPu$$q&~Z4)iay0&Be{@0`zkot8AK z!ld3-iC7J58$;8i;WQW{s)p{eW$HN{ism62h3Z$VIuO~P4-_wbjM1#oT-8H0q;Sy@ z*;cLPy%y(zH!(a;l zDMT#L;QxauktI~iao#na!5tCWzrI9DB9VU zG%u;vr)kDH(J8?03=jy)fYe!6wi2uJv*XRcjtO1Wb*9+3IG?e;)mI!qJkxz-d=g}Y z+rV-O@D1#~ZUNt5M?gS~2fIFpVdvzW0pwz4+Bsp$?i&^*s&(gehko_p^z!r!!Y<2X za_9*?drLI%yo!w(gcX_*zks{#>xv z-n+aS%hMz$H$%H+YFAG?+Bd_5x5hlo)5Gq3ReLGi1tny)7SF9ff-=%${sSd~_1KO8;z%M4x!} zm&)2&Me|Q~-60bPb^~R$7>4j&E$}n>$tHile;;H=}tUEX$!QhD|ls%<(Ml$z`>I_<4Y7CSlifu8*r_Fbpf2p4wq8kN>-}R zhbK)B8`O6usp?{Ns7Wy&2XtIz1#WA%2>&+9lMYdxkNzMvEOm$+Cb7ijhc>spcvbdh zM2eF+30`~XdA%GJTJgQfGN(5*g`Ms;3g_+?;K7{pq4X}8@nHqd!s82!i!?NBtKZMx z|Jz~(5DCNhtpez2Wz0oPSd*T+8~%C`&%ACFs#VC7oj0UN7TP!vMcR~ZJu&_5lQC;Q zkAs-K3vg?Ne`VDAx@yq)+c$qtW$El(1*g%p{L>Wq-5of5gW8@zd5*t^&z7uplQ!Au z8E;yNu&uo=A4ksX@;>UgVJv1ngZxCP-|__=9o`LQKgrV*hh;WcXpSI@Vb}umU4UrC#qrJ#yH~ z3Q&B_NnL{^Jzf6DI)%)^HX;@umacHkA;vXgeJNh2`6y;`y6b)SMws{$1@3N>hc}Bg1=5r zp@FrpcB>sr3(bn9)4Qc4QNm1!azFD}!EE-hWY8?WRHu{9Itu+MMsskIYK*MaEIPu_ zIn_`rwbqhc*XyE?IF(qNeoSO97yllqcqch#zEbAq*wdn>6HvR|mIdL21loU;E zzA+8GVoC>b>|Jw!b9Ncs=rv;%zVK^?KwJYF&{cFbJw)9r)ORyTEJRCJ-ncROs81aX z9SWo>{wL{aB~dv!k(-2(q;;57tAUMyj7Xr=)QAkeXCZ}D1Y$9BIxLRAb z%~5`?di`zpi%-&P-NG|3Ln!dni@m(&N#GQ)5o4kb_LG(SWyZ=s`ycthh23M>vdv}w z@~UuFeW3jm#a|~A;3Vm}?n3Mc!(A*mwA%E#XSeksjMudR+Kl9WAZyQKBX(!-^LX{o zm!kbVZ>+a&3o_R`uNW%Ym_CK-o_JNfVdAMtGh9(FV)_kG0I)hmByvldiy7^-jH75{ z_k&(kzxxlHiPwPJlKwEU1eXK&(%e2)Z4VVCfN8+Vt0P0b?*EVCJD!|c8oV;V`x|fX z$W{;B6S-(KZw-DiOM5Z-M^Om1^sU4;J`G009cl1eBafT~Kw8y!g)2>^*BXNoxB0)H zR|4Ix7ww`9Q-F3cY2B_xA?)B)6jcojm`i7L-Oir*M|O1I=l-#LBL@lax_!$?yG+v| G>VE(_4Jm>E diff --git a/g3doc/tutorial/images/mono_1_of_4.png b/g3doc/tutorial/images/mono_1_of_4.png deleted file mode 100644 index e111a643d54177e5da37f5ee78c62a66f39e5b2f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 21425 zcmdSBWn5KV*FL%t1q{jnX+!~$l7>yHgmkxnf^J?8hKcyluW6JamG9;# z(aDWjo_lipZ2l&-z3U^tGxEoZ?Lp(Nm0=qx8zybsEyH)Wx|wAf9?%pAllZOdS_iPi z7`?kXAL3FiUK07m;`Ih~^(s?+!2b5Gi;OIxH-V9)WMYu3-eRO>y}962!CJ1Rz*LV| z2u$q%AAe0(;$>8IbaWIHo+>h^IXgRRX~i>i5C?o1hWY#A_MGc#%X8nqujlIeqBgb7 z!|#{pjAqmJA>R1$1}{5%TK?+l>K~=c2!vWUnughGy`HKnA)50mw6S_?M@Ppo3l8Fd zJ?1P~bOfTw%~M8Nnv8_x)_zP(3~RL1Wuu`kyr?dNbGPH;<00PivNGxMytb~hiI0gq z@@NRe46l$R^U=n~>OXUH)Q;#F7>kFKs;PZ<14O;PKi*coY=l5~&JiRP9U%BIFR{aH z(P9wNo)`$k^WTVxJ;H&R_@6;lll9O1oBIa_Y&ItO3r1kZgQKJBYHD|F&!eNG^YZf0 z(a}>=QyUu_Pft&~y1I1rdwFA|GV=5DW4SCp`Y3L2Z;RLMyid!_G-W5o_iff{@|vu) zp}l|qehQk#F9!`x^*j~3uEXtx)9O2dx%ADJJbI{^a|rd=iv@3=q}urYe5AYB8T>FK zBf~(ObAeXj%fZ^8KYu!TWu&Co+1Weo#cH+Be}DIi40O9kUX+?To8G8?Q1PRuhp>qE zuXb{nJK?RQQ``kh>`iICKzzQYe;yEF?nzEcswB27x1HOw5KDlmgY+zI`|9TAc4jX8 zY0tTKV14}8FTU+Sm0V@Mj~*nOy;KQ-pFUllEUK<{2_$4?Te+14^Tew~Z@- zAX%TColRu{v&yQ!xnw_YQC(Ga506z(Vuy_P=;hj^uhph-WDd54cje(O^nY$| zUw$6=uFoiL?R{2LOF%%tF7p8ezX=-h@0~tOVg!P6`cf%YDw~V+Wtwi|)ZaumKJY9L z=&E{oc`aLFVqjeI7#bbT&Zi(Fi;s40CTn)K^YSNT?%sPU%v7Pqux&2%&!!^z(45W8 z82I_!=E)Ql6i!ZKGt*ytECh5!GS}}s+1}nheZ}?nTK)DyPwqe%J11w|PT-m0E-dM& zb{pwL@v|Gf<~G;jygnna5eOmA1af5&WZYVvc7elu%`m+4nM@}eug1Y)HAj8vb-Ugu zdUE6o#>Z?zJ>~{pc&J6f>b;KJYfv)kG5uT8r$g>AcM!}5WqgPS7M>O+CR><`u_fC< zt1Km4B|9@3&o(b(U}B!)vPcV~+y00%2z0-&NlGDRJ|S6>y8BtidR6He86TgM?P%j; z_iooRqrp;nB5K*ukkr)rMn-Mt^qV#=F4aFPn|0-L>&eK-E*{2^i2|cUB&qY<_DJYrBe8n>uJ>A0j8nW@IDXWmHF(_3*~f(%7tz5ev@kb!xX-igT+Q>^ z&=9jL=AYpz5R6woZSsu^KYA1|!=*Xg+0)~(q;m}Qlcg-y<+?U)|mX~R0 zY0+;PN=SV4z=(i_uHRug!=7Og>eP0W$*}W4&2i2?(VQ5zMm!u$@v_u z4!86x#`(p#FgA1HRG*!l_4UcWiFDat_gh@8pD8RWQ+Df(_B&5!ejq$UeXw8X`S-Lx z^ABx;f`h|;Tbu8ZH6J;yA%v{|Pk%zU!E4;Ua|agg*RNltrR>Dua}jc1G|F;nYEE(s z>WtgKX9O+Qss9GLHlHRgxg9$XF(O&wX` zC+{gU9T8@AFP96V5F~gtwq%>HQ=g!61%dd+s-&Ueywp!!FmlZB%VMHxsK3A1eocdz z2yX9q;^b7>_O*w~%*<@{SEbGEG?XtXU?-L~H(xtF-cJ+>_U)T;KL+z*TEhk7cC`M} z7W`0b!FYu1Y#P_2t-XCl2*#lt&eN(cuc+8pzj2q0jFFjn!58(ZJ-nk>5IMg;^_1(j zw6!&>5BPLMkw|La$vjuRdS{z$u+8zF`G-LGSm=g&z56M=MM867QBl!-u|O--X5!si zI$WQq?wR?%Ioryk4WFn<j~H zz@mB!023wE`cyrI^J9K^G--9e5&ONpI;;`*hfg6!Q|8>jXsl6K%1m@e?&GA^B=YR+ z?OD5`=47zY9UrUu=ybD{qVZ;HV_9P50?mtRdq$tT@W5B8eMX{UVvXF-Un~xI`j`sZ z951E@aks-)Sg|uRGgs33Nw_SeNNUvW<~yUQM|()~wW?d>QA_u;({wA$*sE7rS=pGK z*iP`ul`F7<12@+Van#k-Nl|{*yS32=moQYiR(oTj+QCg=ii*wR>)y5kXKVmEICamz zYA14UiV7+#qc8sZSPe{~6qP*cYc5fSmjOCrKXImBssb@ga0w4;^)5>ivt z;3yNhbx$ZLFwpv+t99HD=SrGB$HjF{!vw$So`Thhe@5_&-46>3v!a1}C?9|U$Wx^) zzy@>;Ujig?eh_rt77br{eh!O>pn2+tlSoKFV9koU>1$Uk7wuEeGdQR^ zM_b@5&v$jscN5PK6Qhmc$RX~DOG-+5BTnZcb?SEJ9PI2)kL%Bm!QGy(Z=3~&!kZxO zZ63O^+;#Fe*%S+Ey_H`}#%pgjf4!%(^CSpnu)usWUVlnrh(g6rt&jKC3je@*ZG2$i z2k^4_tL}Kcx^T6yrp7gp01qu@0IotLAItMyzv zl>;{?XSvDHql-A)keZbAl4WzyqT<+L7cA3@3RnR-fz)$PV;FD0cbUza2^|`|;yP ztX^0%->|s2IB+&sFo%6`DKIi#h5lgQ-xECf`|NiViGrGGPlKwC+nt%~{|x;$CN`EZ zRK5P}XysdNS$TOk|GvbZ{_Rw;pi8Y7_fSu#q+%!TqB8u0Q)9G|{;bcbcJf=L>R?wx zY{He;E`p<6nu{q;!^xzQtX~-^z`fD>7QO%UWV;8J2qvi3;({|cGQw35DRr^JDWrn# zt}KVU5N`*NT>f(I^PNPS8>N^x<&TecSEi+V95yDgrT$*5jIdYhth&@k)T#Co@CE}o zIe6nZunp&JAvWPE>TR)tY!Y?XK#A|{+wIDWoyRjL zWO|_{l~QZBGFZ-MxZcy0$Sm$x>4xy{mPvf5rwu6NQcjs@waWtH#38PI1bpJEj5UgPR$(Cfn z2ua_f>+bSUadB~5?X`;&lJba=aV<4SV0mqA_t15kk;h}Rnb4o;cl5>Mq9C*gPwqPM zK}6P3qg`qZqR{zj_5C8`3Yf^oUR;zO84;lZf0~-~TRUJyyQlk8IR35fK-ggSeRAKmUGSL_^Zi zEp<8AR8agh{K6WmHKbe?xU3PW_Gn@YQ`x8d&c+XCdfuDcDKL^Jg3Pc|z1nVDVEo?5 zbj()JuWFKb5$V9L#2+NGqg-(+1QT?}5urFmXT4$;s^$TGW*YlP&fO?i^+4Cty_&Kh zXPC}osLPN@$qfgmhO2ww0K%?O;)U&o|a}M z0hhdV!dOg8OCw7cUGZ5D$XvZ1*!{L6|CUVkcqI*gKncn*0wDI^d0|G!MqyWP$H}*( zu60e&?&q)Wp=lqhd7RqY@f zp}%WjU@-stcM?|hty{MUnYE>)rKP_iw5q0*torf05RBycohH*}aF4GYdC}XW$4EIWTD-xF`x-;cgGHelu?IY#*i5V* z(cBdrjNf=&yFm5VZNUdho|A>Io@G}*ih%{j8RRV~DdAy+z1CIL(@VuF)6&v9yxuAxqM%5O ziu#L-85|i&%elC;B%Xy}Bz{nkjXXlD`kj{ghqB}*ENGLf=iW%{u5;~Ip2v@WPYG!a zOiaYfp;e_6v!;~bc9$>8jMA?T#YMm>7Im!IW7$s1*~z-Gd2 zr%~JLc6fLQN#Df81eMgW>PN)d_8~C|N#lXg=FOzsoE*-$axs|HnUugwp0=x3QPy+4 zmnL*6K8QYrBB{dlQN2!0JTyQsrj#kSzS>-LwfY3uwKl_54a;R*J}>@koO1)J7t7igGu_w=ax zZ2<Gc2a8|9uhPH`GPSyO$;kK@{q@=&I zVxFi^E*6;>+R=o=)P&}pA#Ku)5v7{_Ntd|Li zj^ajho@)=cAFZ1n$XQHVsL(Vk9taKzY2bn7!ox(XN=p;21p}tD5ODnat^SF&c7oc# zz(A-Gq>b|4XGzO)wwHD+BK611%`cXqD8I5&-Fqw}gJ##@*)y-NqobosQ%U*xGGFj{ zc+t>p6%-Ui^hIuCc)K4fl}VQ?x}#1AIx=3yd#tF4C3i?eL1AXsxmnuld(s&v8=GQn zQ?E{n3|KDYov;j2MV)@u9d{(+DAlcFhp>e4qMHDBWR)s4iT|Ph>P3N5nH6#t<23e zH8E*7;{>(~Lh7SO^b&b#9{S9Hx zPAuX{PC{~BbxacjmG%qlRBzOomW=PK>-jrxv{36>fV#`D?_^>XZ*Bphn0_PSM z{N;{!mK5z)4f9lZ%_qO_?b#Or>&J8xO`~b6{ss@32RLUs-5s#ic&3rbM>chO=Nb3) zc>?Av>)Hm?6E!9UOePnPl9Fn^rkT|c`PPkf~r<@PNws;6sOh4x|$SBwXv0wd1c@A^oX*kv}%3kvX6)FMn?s z@@mD>9MRP2*D$0S&fxci(_7fxH+r3%oCb!yS%e&HAEF_xDjj4^EF(XO`h7z=0<|=i zq*H!&1B^vDszsuIR-a#9oSt3+F7{K9Bwz-yppjw}PZ~j6hP$)4n1!79U5a2-aR$J7 z)6NmS)VGc5VNXOddPhGQS%~Acu=Jwm;^N{309_*(HE(QejC!=akT)+wuId&ksjjiP z6_#18kXY6{@gM=C6}Gf2Xe*<7$i>MSBSpg@K$cAvrnOihsCMo?BRjg*e=T z#9{riZ2zrW3Ob2{y_EWfHoqPVQgLmJ>3xn5d}cA85JdL>0xud{x4vss=eE=uCI){8 z_W#=#1w}M8}*pYFqR5HQFiCw zi`$7?18?D&`|~eGdGQ#o9^Jw*4Vd+u`)yiKD~f&z;Tf4*P(VRUeC<=WKw?2&o^;d7 zV*!c9Ss{`e3YwZ(`b%f-*+ugGCOn@Gm;rV=tc{u%yfGJ?#?t?~e$HyUuRdN*llfw0 ztj@(yLE&Zq?1r{Xlm6QE=-^6wOSD_rX_=B16@vr!0|Kt#4&1-5V3^5@B-c0oqFq~+ zGFG*(x}|Bjc0Iv?f8mqcx%@)%vV=h(9BE0tU^e{aT?NxzcJ>F-((jcbEKEXdksAE? zaO9MD|E$rH{x`z;0D0uO2=KD@1k=lH`1#p2r~1Z;%#P~Lwo1#(A)N;8{eHR=Rd!Sx z^NC@NAqhk2e-(Z5S}Kv|*j?8Vh_Iq%NQC%UST6d$mJFLPDZUaq>i+d!VGs?7T^S305W4_%#A?li&Q)r%!yiG!f{fIwm;} zI{KUa?}ew=YG+q#7OSNdYCrI!jtC1=QjxbXvi0Ww7ehV5?e7N6SXxzu#5bX(q4{Ou z0HugD3Z@h^q?~rbU_TW$&Lic8Bz@ze=$}!=JFIf0k{(HuM9Di}aJu9VlQk=L3E4RS zO6J1ed22v&9Pi_=;khR%t=!%@F(L7Tr>VG_V6d9sTI&-#hvh6(urFHp{346Kx(?tTHi9PAq;ha#o29(aSf_ zsAcCyAg)RX8{(S)ZU6$|I6dv5m$j`O9lGshM#*DWiFyZE7vQWcTFmX$(FWY zskqG@NbMmVk0C>N3O{)I%?lH^M%&Uv$(ga~xhGEA0Nt#$w^{@_u>wZQ@Cwfx-R{7d z{e9u|lP^pe7^;)&=Ip%ik`MX$C)VsyapCSnZ?Bx-b*-rR`+naMYmp%-Qm=lFW=k2~ zEoI>6%=q+l%^qI*j z)0?dqaP*fJ>GB_2@+|pUUjV+6ny?m3$X*tIvlC~h2`?{VWi#*#Flp5>42?)eN0XjE zXBp4kikW+ex%e&=Cp0zWpQ{glbgMKJp_~^c_Q3wS_}W@X-$NFJhNE&d8HvnzDru5k z=Bjd;3^4`5)4R8?j|GV|VU2#EZxLLRfOl79@xD-x_-oz1=TsE_VWhh1Rmo3&OsVlX zn0Lh*n#<$#QWcNmR2xA8a`WBD%AF_WU|_np1KZj z)8OKrOM`>M1|qeW;$1Y#``?4d7uL9hVO0O=hQ4?8dae>%~GvIdgB(+Cmh zjTH>-vLFcnZWWmSmyaJRzQe_)+b#sV+ux@zsWK{KAOC_gmb)bowD*uyQy|>me2!fJ z42PbYx>>iVw7lHj&d$!k!Jp!b3WVK9XvmcQf$+m`F3w?<-=EP*CJHh~cZH>A(r5Fj z;qAUja&VpbrEHOc+I~3*Za~|!6_DMg2hxbfRkZ>?X(4HZUVkZ;%PTUSmNxw@T6VU$ zb6*gd!uLRP7Z0-CXe^Jpe)En@)cwK$ZB1SzL`5xbY^b*8-4Z;wU-nhpGcaRNNjxIm zpOE@L-b!Mp*j7U$ri>{E?Wn7S6VkqLdR6PHONh@_Tie@1VZQ*=1N(oF3t==OA|gAR zZd0@6t#%dq11G-Zh;&TM^auhvW@dbn_Ic|+2@$DISEipBaxBooGPFTiOKyB>N^l$_ zK0aO-eUll@**HBzLM3-_g3+lKELxYOD7~5Lef2Gu)iJFq%LTthJFm(8BAXQf^h|0P zA?w1;s7Dx%1Acn^sg@h7_f>FYrcw+hW_XUxLw(~QvFz8(mO#trxc(%IKg61{DB>}h zpes1R&ZI)!Us=f$hAQYtcm_Vn&X$O-tkzt*N><9SwoP}M)10iGufH_IIuIxuCy&+Z z;;}ZSUKKfji}1XhK4g-ak>Tv(VmL0eHv3)5yGS-uv1RV=T`cX0gct>+GIRaKvgHlm zS)$q*XXLCLj&G2A2fno!@N2Ls2?>eeS-WnLl$4ZrE;`hlkiw;GvEj))Pw6Cs0_alZ zswetAYTvPI6YbUMoaAEdd83f2$Dgrc$sS`2gwPD@-5Ual8;5-0KJb!e-^pZl>i(Q6 z=rlq(9QL#u!g)apR#|;U4COf~8PD|>Cjw*?=aSRX9>_4l35xV>=vfFtkU1zQJO~wE zURBCqyHUiTrAn*+x@n4DtHdB+)!qVm=j`6vKpz`JZ-IxIF+TD&Z&TeH6VP9`>A! ztpBgo-v_5V*@NRGT>49^t7g?5(H^XPc8liH-6oNG6Ucr*mX6{n{S3t|Ql5c|qoWGX zyaq=I71WO=`Lk=aSc${2$V*sRS#$J{1B2Go4GB5)D>n7S-q$*J#c-hV@!thdWgZ_N z_wn&T=S2p6`ebWs``Wu~^0{ZA53{_4y2c;9;1DmGrOh`>Tl(2`+U=d6nn#MAcbEH5 zGagl(Od4LN6Cre8F072-h+IuY0o?=U9p}@ZKYyByTo-UV8Z=_RnRF}Ex0#C6-lNHU?g1=;$kvU8N^RT`mIx?sQNk*q!YxtN9|lw(@D*58|Cd)*HoFd&fk z3=@{tU*o7bl*a|$n4L*YT8-t;8wW?($ad%koP9OR$tDNQ}xfik_e(3B)#SZF2MI8%y#uudWhjh>3>LX-U!e znQnujoD``s*r+DKbACcr7l;VJTiR_8`~B=$`_Aus5$Sfw7+A)ps{x|hzrUjwjKm%m zziES<4@fy$car>9HCVk@srgTP{!gR@nMS$auU~zA_pI}t|KMlKQpLl;#+KGhjEofX zY^nM4-^hxRgF`+75=WNow6s5mCw?yYNkxydvqKmp37J1AMeI4JuUq`km0t&hhj(jh zYs+~)=o)(im7tD^aWDqT>GT$t{**GS=~aZNKq-Kqw5jl~U!mjiQHvYiD^Yeym9Zl% z9StG$2eSJ4`T5}BV4i&9F9ij(LV8!1Y}>z*qN4-A^Z5|=Hfz@zwN%2QQ1L1WE6K3w}(He6`oNbk!_7`N{Ii zb?pd(j(FagdW60)oR)KhIcF?7x-H0HJkUdbJQxO^2UbOgo%oG_R;xWVGNP)cwiq<%u>MQlr(y*N zGwi8f7BDZG|HdFi$!}4qp)WAmx-UdOur_E}7O;+K^*-ijP5BgW$VxZ62uV9$Qt;vp z4E^=+7No7g*j08D>107PlwQ{~V_0`b*!RWT%U?ccg-4`^qjW zO@dmZ&k2?&f@@QHz$Cn?n!13?QC-9AetD@{+F$J3kgKKK0dD~n3ao+}zVv=R?kuJF zXAF`H3j;8nCO-dWs;B0Pinq{+eLoK>rFQ~G99vL4Qtn_yO+`0cG?j>)oScYAJfOW5 z>K9jK{yQMM@S#qRnh9j?)aT02o;5Z#HPyQ8gUS40U}a;w+ADPTzcPn^qj0npF1*ZP)Kn}`Ui~c z0_MSDTbJtB?2qtaFubI<-Wd_;A_8>|%#I=C6p>6i5z8y-j{`E`Kx6d^@!cv?^VY); z7Lmgt&Jg)cP^9T?+1`I~pD-Im%ab2YESJK80W_!nGaDXR!%xyXfXF++9 zS_r)Zym>!weOpO&JZ50kmTdc|p_%0gmp;$Iame|c$7!?H##r{b)U`3qDsVA$D z%l0kKb%>sH^oNG_fAbt16h__eD9r6n{tk$qOYC^eAt52a&lHi`8)>d4XD)mb*%wqM zUzymQ+Auj!^f-%^Yww?!2x+DgncGl9Z3de6IQi^x2JZRTgj2A;M#S0FdC%_o83p~j z{{ZJ8FNlm$u;0&Jm<7+}0#3V1rp0>(+WQmT_Rq@D)~3$g6d>e`gszk;}a9PTj7|Snrg@!%}fv97)OQNKA}LnFhG# z@v*Tc++~RX(NMzS`sSNSZ`;(1bl*?hTX16%1MLO%%9l0Ij-yGJjcnYQkdzzkeM9Lx z-Z&Z&dxvR|GN@-TU?Y4z{>em(HU=PBTKdqnF6=S(k%T7()ob9C zfk$@H2z&~u?H;mYvew1Wh1Ep;=)}qR^#dST%%pBw@>o=aC3L^KjA%lSm!YNFYHIRU zcl{+EqLLfWS+WmG!Dix$1FQQ&=qzR(!;CFDV=18t&0F*8u@m>|kCt7$2n zkTpy}y*YVA$HR@K=S6C8)4szd1{)tK!03NL@1H1iL_{F4qRI9^-R(9PDhymIRpUV- z9}P!I(SR+N?8OND+{z|BMbu6xROxp_Sk+!DV_>A(I&l2^UlEjYY%rKhSS~*|S8=-( ztCtGdQT!!if!AT(d!-0G(F4*fh9GN9BfHU6_AVBJ)^~nV3?W5rtyI*Q(*0ECx%$Sz z^=Js+o5A5wKRt9M04SV01eT{}VE2UFPQ&Gw(W*u@l$Gz4MH)YTsCsL% z@)n=Ctkgr*hn(yeBWR0?e$X!9A`W=<1qqw@9Ehs`cW0H%2c;T$lAAK=pR7hMFtUVt zRf!_e6K1LHCuj&fEpUZ5O-xKnB2q5ZZra2)Lo6D}k3gX>Cp9>{$H(F7T2zV@kne#` z_P)RsJEYb(!;oYE>pYR@87Dq=%TC#E*pP2VPzs!Q=OjOvvxVoE%8)#3qLfjx{AL zy$e`Egy)q$lQNTyg^Qw(qGD)0?arS+ukW4RkdyonH-0Hpvn2j1?+%3gYLqJO1uqLr@);ef^5JbzWME|_M-~rqV!juVH(|Y> zKcr)(=U(!4_ne%5?DK|n@%eM<@Ym{9j`CZFz!QmyrROuD=JQgDCKQQfJFkE#U-dLj{44~*1 z9}$E~MTTFW>;I)q?R#jEAn1_*2jB}R>P)pt(QUhls#k5ls1;GI$4CNjb&%Ay1nd7P zYGz#YoY}zi3PLDkY-~(8i;<4*uwy|Y2U$|#0SBFsyArUjuQX@m{e4@d>L-M!W99Jf zj+PbwK@?sQ>CFbH12of^L|u4k&Az(1Ykl3~95NZqy+tV87a2P&)y31MjA0)ol^|V+ zx)eiVa>AkkiTpz*qP*PI?7~91HS~Bu&H~qmu3tNyRRA$*j%g9F`7DQBhVSy9qw{ zC76JAovXTZ4;B5wX}U+^82{M9_T2k|RtlI>T&e1SxsR6o!>5ECJbM(Zv>Mwv7r0Th z5%446NJ^91gp!+xh%&(0#=B_{$v%Aeuv0j!0B2ZuZBpljHA8XZ8#6xrNYfVuwR;*3 zoYp9ePj`zb@Y7`osR1awf|@hvD;~Yr1JXfD`e0byMLAw_f1M^qBYj9BAOP|cARGe+ zhiQmz0A>Ps(-gw7WNe-1GC;?3Q!BA+H-F8=zORi9@Dnw)f|w6GA1R->pxdL}JqbHI z1yKw}OvIap+09=!zhQ+O8Xq9woaNE@}5~*Z4|(iAdj= zxSo*K+M3=GnSSI<#}*oa?)g)%Ok?~Vex;L<(etj}3mG!y`WKg3JIw%_g{xx2Gc9vJ zt^23BUz8Zrqmwaz{0~{s`GMK$@S4&k!kP~Gb;4t$W zIfjWdN0m?A!<|CiJKDou6PsKGu&QsonZe6Rz$X>->7im(lvF#nGt}Phbre6Zjh`Wl zLBzkTS0Hr#4a+{4Zc7jbB>n@fB4MNncJd%G;mOU-P20_RX-^phQS?g3s&Y65 z)9c<3UrNf&6S-F{7WT-TMXzH}Wfsb9Ii~#=hugno#EDRggY%g1^62QuRUi?GW2>j9 z_mw;bLDpe=C6>%*LPXl1n3H?q)5K1)Yp+7W)3xjP8MeJW&Z)hHdO_I-LgDHCuX63% zB{1D$%MLt^P35PHHMKFpiyR~K0YhW-nlN|LpM@&xjl?+diipLu zrUq@^lzQ8@P|eNP&Ss^1oi(IqJh~_? z@h3wRaT7?_DT7)T2Cz8>Y84jodBfoVO68nY=soU~1!9wM_KvAVfeZ>^g$hEVq5eL}LG`}e3t zP$hwa|6Np)d__EX8`>!XpP__HWPXmYcr!tN7~g9e+jNcu@_#6HrX|}+70OjMU|HQz zHDJG?0Y$A8u<6T-O(p+_rfJohX#?L0-#&J0G8-v{d1@pn9xO$ZUBZ@DymL`7S3(iQ zz1HwN?R8FzUtiCC`H`S265!SyU?C!Y3I~&^;}B)%f9=PwHTO)&GW8Vm^eYt&j0So@+_-$Kz&u_DL&e+)(PKRvIN1(E zH(>`#2A&9}oB+ni2A}~{?7zg&CEHFYmISy9;ro|2Ycq};dvv}6ZRNOt_PbD%;bfSZlSTZ@L;igxt$of%xv{n3xCodzXbgTl%wd$}PCfgWN^&zZ=z8e%fm#eU$Sk zGT%#JYCS^Ax$2@U9nwQs!@!SxWM?OSK~Y^c5)L*#KclAGGCx$B+&(*or7sL4+aY^1 zTK^Ca2ytvT5Yu$l+l~>-~uuA`|th zO7-@R*?Jr}rBr77>l2zJk(8;69_CZ!0Tvli9SZinLC}L^T{7)eTRg7p-c*^{r#|=x zPv;X?La{$Hx^7>5F_cDsPAu{{HcdlLp`%d)eHaJMZ?m&Km?T{;s#LN6!wFwD@=lb= zq_VZT}WlYkJTUPq~KMs?I6R@TyBEZUdE?o6>=6(7q7Lhb6pN>lFIf*d}8?|$b-4k!^NsU|v%x736yaa?8mWd3F6{cwdi4guh_GGN$Nh;r8@2o7j6s!jWIG;Zmv3dS8incA zc%4;oX;%z!S9r`|jeH%kudtDJ9^2wJZ*o8@Q74Mgvt=JFE*;Oiaf${qhO5wXa7vt>{#z|uVg%=FRPzE9o8gUT z_L7%V#lfHbFi75Kdzxw6>!2)J=5;om6U8r55lM_oYA3SeVKnE%r1gFNGEa3A;bF+l z?9^w(G#*|Ao8a~mYJ&>OJR z#Wm%~mjJCfcLQ3eqhmiJ$V@@QW?k4qP+1?fVMTWI+Y5Kbp_4&)3fzc8ixKAOAcyIn zs+i_amH6$&OJNKnRky5sfBZ|=yNIjDTw^aZgT|Mf($XDhKLU}XTR9~ZCAa1}S_8?( zOO0=#A%ltb*BAwE2IJ1x1mURX=GjhubFE!WqDU{|-WybDF~dCF@7Uk4uoX?lmk6x< z_;LTi>S0FTNhQPS@9&A|?5PuI9bKUH25ASt0VF5q9)l~nfGeu=AR=6(hL92sNh8R| zj)@bo#;ZU!i7b8I!&%LL|GpPNiz}F3Zc6`?`ep-%wxUBaX7jb&z_!T5A$^eVrViIW z@QC=t5go0dMb-secV%TIs>{jIW?QPlV!|33AM8RxE!1lEfpdM_rI5z#G`Zqy$CsgD zkk$83Phi(Wb0>=G~ zDNr&6H2oC4yr>qTBlpsVhd_PLr6k>1bqLMBx!9nKIOeuL{u0g+uPFmXSc?Uwn`wZe z!z1T`_XKgaQDiFJNJPg20*h(XJSqhoI0<;A;^;o-{0d8|W9%&Vci^%-Br z48}_gaZeNQ34*~fAI?gc+Szp`WEq?4lu=~+&C5NQ@&7n$)n!d~i&!WHuc!=)-!aMy zJV?7g^Yc*D6a)4ZCE9anYvz!k9h6)47-bxyLdMd!11mqli8txcfR5R9EO5hyOe4)O zmUMt@XWQQ)oYY3d3E6@!re(RX#Jz%O%Kh@?3!oQ1eXMC83%iOqsco=Qjau`heNSKW9~+tQcj5kW84HU)Y7W3nCGDs!hU z>o9cN@8V*{W$#0=h2o&a>kM2s^tC~g$?`7Hyq=4Syo(zJS}!qA-|0lCj?ZRz)vXnp zHX6Hxp_dx{d{ogHjX{}U#2y2}NJAD&GqcW@kAdnHo=W6hjO^yKg5H{ztuHJSI7d?rRJI3r^xne|Pt4ss7AAF=L1sZ>OJeMbU|Hj;KkTZRA7Fa)XlsfK`aM<(>b0$= z=GY9oU+e34-Pj@UpOf`C8o0#t@P^yFJJ5G=Is9xacliFgQN{wu=t()&!AMbKlHg=vR^}tKJ!4 z;thb#`lc=?$lKX+o81M^4)hjT_M7}}B`>&ae%)^Gy%&huIZwG&y5pDs2v;0B)eCDs z!>!eEGKP!acVN;9da6|AAGBK%MnZ2JyJ({w?X8U`f2shU+f%NlYLBCmNn#tnZbH{) z@av3jhz-JT1$wZOF$bVCsHZ4bIalTV*Tf*fGe>P%R$5xay3=PL{r#a|2#<-{2WR!5 ziRe9|c0Wq5`V}oU4yFCv{CqckI0kmzy}*PwJBxiz{5^114>1<66EcB?QoiW#?>F<& z3TAp*KcNdnn6N~&rqaXixUJ)FdH6C!qoRJ$f10zLv`-`S# z7pnWmP=zOBxBlrbMhCzGH~&;tQY!USI+_g6g;L7crQv3l-7(H*fXV z5r9S?+A1|VS3f`sZ8O*b?@)*swXD^*;hjAEMc%#Rs!(1{N<>7&&g`k?0D$p|p(1{y z$lfWS8`yEysV6ZyF}BdPw%ICpCci4@JrAu>$F6mT({k$S;;?Uz4HqwTE$7CLtUKki zdnVQ9AoXNt@Z`qJ|4iHwNFgW#R~2@&wPiun5-2x}*BT6)*f=k)O3ASX6$?9^C->)w zh&={0*TcC;=ppj;h6oNN9D1$2eHo@S$(>G> zVG+o&EI|=#KP7vL~o`$$&@2uDMCZEb@xnSLCAc^70}y zUgCq{Ai)+yLcniV{CJ&{Q(pCJ9i^b|P?lGwyHHT)4w>-Y-A_IPH8>@T|4| zq@&)K;%q7LEW4;E_=vx-DUgge0&D3I+DD<0RESR{NyO+3`Y*>LA_zi%ge4@%#3CdM zYHIlKFYSQLn|m1H`SqDDhORvPQC(h~oYW2k!78HR<>}F`qyp~Q`upZiDgkCNqFI2o z2ueOsDx?s^dq97jajN_B?h&;9i5_}Zq18FYbzoFOFSX16(F^S~k^I9PrQq+i$d zEtiGPfYJmb%W{UM$-#abW#u7|mLHAhsun;qxxn9w;KhzN(Ej~Z012`q@*qPf`!of? z4|aI|d~oRvwAJ@4+zQ@LCwXmPfKuXGT?(F_o{ktOONC6NDC)8df$+xUsdlG zn0}hVPik##9g-zZ?8!9Hh07fu+$=n@;DmZr!=@Qp6*T*wNW3-*PCKP1hMAV?2(mbF%2D^DcGT} zBj3JxLiZI{RzU$_qK$6)L3rCHR7y`M&VFt(jWlty0a1U#xEWPY*-G1=OM*#~xI`f-p|`?myeq&a5N)NJ}?~&SQmm zcWZ;P!g5O6)xZtfbCHBnFs)0~bhg{5%1x=$g&9`ltLiCiH zbeGKxehp*l`&iGKeeO7BOjJ}gkkjK$d=G)^^HHKt|7Q7$KR^R61;z&_Y z$I$w4Yfj(Bh=4TdbV=RS;PM%o(&>#`6n8-*XW~S|%RBDj7o;}z^Jf-80MrBr>vNf6 z`w(bNFFgq?)pctfk7L%bt;*l>8ekZC5aX<3r(hT**VpUh4oNVdwP#MsyA9m3d^2M% zr!_S+l%5}_j)UnLc>o=(1fdD_(3SnI_RgId9e+3%*V3?{AVPnHJ*S2xtS# z%P0T?o#}h# z>Y#j1B_?ZxNQV67I@T8|dd$5#JaLUbOIHYk&RG8*wW*6Y1*3Hf{C&{1e}MtHKycB%gzua>v|WhDOh1#+Zw z`3|XV|H$R5z2G81B(SW*#!rq5j)DPmF)7umvyY{RBI)?(2!6Ju85ftFFKT!MT6QTV zh>iddCchpASqJiOU~_ZxbPT#a+WkdodT&~5PYW}%+k({-7Q1Z58Xc$;tc3b`BjqW> z!Q{u6veD75kYPGQna0&x-@qV;j0ak}AmB%Q15XP-eL%ooKDK6mt_AWD0$1#rnE{&< z8RT6*#2W7RWg$|5L}n{KsD*n*UF+@F!Dy1JKRG@$q>9rMt~74^d5AeF)BmfS^Z!fg z-s3nhzwOpATlsa%`K_(gs5BKrM2EW6hu)UORKk_5+-xdJrl>&cZfPK1GF*NOliR43 z)O9KlLQN$#or4}!0Q#!(sCi(xA*7!AKdfP;hYEOanAdEKA-pdb3U)<>-2WOP_P8dKIQ;zWwkV<{_A4TZ)1{lEn~Ak zoLR5T%i9t2){e-KkV1=jmCHph|4{Vi(eSW<#B%OWpWj#p?QVJJS8(%wBaa`qr9{;0 z3*oxD;A8QBLA7leaZ6M5U4rONpxXY*;=@KP_HGB4%f2V}%Dxs|CeTUWgt6RG~+ud0x){rRkym#DcOKz&k#l zpHTGoX;97r`_m4lC1@Ks{`3F{ur6n|f$a&1;b3YHA$D<4G-PF+Px1KX;o1qk%p-L<%p+{aLo|GXkLY;nY+& znHyZO!W)a#W(9Y&wUMQH*MNzeY|nP|#J}g^QD4LN1Tr5c8;~0>{sKCUvyc~wu8-IM z<}eN|gGl@hK3-^0K-D2$Tc5~3ZVKtxhn^-8KiE`W?m)CbIXN9XN7#Mn9DxXj9Bi7s zy-~3Lf%hgj+1!jecY^@?FqSwFV{F%dmlIwxx29u$L3O*$~H#b)= zVYAtod-1*tux2L8$087Bfvo{lH0^P7b8~Bq$kBpeJPt>bPvz^OjD{wkmuDKi)w5&I zbP;aF-X1Y6*wiGENc`uWZVJox)&^iOR?FdagxxZO3~y*`RFU+0eat{tR~Jk*X* z9AU4_$1QrH*>|85wSfiD$<$B81u*{clQYOch_g zQ!^@E{h^V%+x7J+(1%MCpe*&)Ys9JXa zJ_d8z@j`bNCt%w(tX(7y$3d$%lU`Y&J?GjQHY!1xnrv%OXKe`tTLz5a43)~;(UDb~ z`j(=-eQPDW0NwI)hzxnFSTmOWedixq?GB7D$=rK13}|$G zj%&V`Y;%+Uw;I(Ikw{crM56^OoUhhZEfpL&m>fwCbKTe+5iMhal9LeuN&e;R>Sk_Q zi*s#z(Ut}9PvH@vaACSN`4m8W9%V9TuOq`l|0rTq{fn-!yhv3+Q#1J^!!_Ca8;1@d z!&`3O=EMrYprWQat2l~&l12IKGpS5gu@bgNpic-&2&4EBLhFBNpqO{N4u5J+0Yt&Y z-oP1fbaYg`D1{hD3)O~Yx|{q$_sk0M? z@k63Nydy}3x327(>FcU6{Byo=ZkhhKQ9uL(zT35s#VY~CZc63Qa>PG7gsz~}loRtS zA4(?x0aw25N4&&1$g(`U(-?ewuTx#-_B{eAQR#dAqi5U^!I)gB{G9JSG&Yt!tyZF6zYyM(bYwqvQfnhqn zDBYayGsW$FR^F*wsz?&O8>q&+SLQc{ZQn%F{`%`6dNr+PmK|GHwQ~GR+1=LGOVz8; zg8{d?-y?soJ=5HfY(|t*PZmN}`2XBkOi3iN!uJAhf}Qw$Jp>10l8EBy&%gN(@eGB7vbJ>bo3UlY|z z$=J7M=HuTmv{Z4g+>4#Rzp0*w0q_3w%2?Cfk4*}%X+ z#Gs&!m6g+8j!tRjt~EOwTT@$rGwI!Mz!@4?4NnUXkBd>0cvZNYV!RCHu9$*C^uUT{ znV#dG5#&`Q+j%bYU%k+F_Uv0E2&6O0FM{9ybZq}r`}W81@84QVk0tt!Ms3hnl!G2_ z#Ky*kg*}>$Iy`ht8mG>I44rEg&S0mwySShmlHJ)wLLMVlKMKN_0%07CX7YZ$0P)?M zZ~S>gkIVs<;iX6@tw>BvjNxIRMsZXPBqJ#aPV$kAm{?X`K7$-7RY6KcB{r)(KW^D6 zBM>nc1ReSc2YSb$_MKhz=SGPoN9yY8hos(o;%sSe@8+y>QUU^z08}vUp~XXsr|kM``e4JuEED*4FmqrlXh1pyeGi78?wTJwB}0=9A*qbD6oY zu&^gF|N50k@2_7R+}v5h{+UF!78ak+8Vd`1SIW4!xcDOO+}zyy*PLSZdtMOZE7%_> zvrJQpP*YQnr>QzC=Z@Y%4My8W$xw0sR#jDfKg@uCdcE`WC#2Qf#>VESVB%S0dHKk_ z?xV|U=-Aluh->z7B$OZ4;u&Y)eY!PSZ9YO2C24QFHU@WA}z z-P^)0(^)=**MIt;L*tuh$UGUOSy@>fuTCt<`vTd9x8?(*)%Y83iMrq;L>LI{i8T%`k zl$r~O{H9X!wBDzezUkN54f~L!S7 za}RLa{8CeYqTN}L?Q5nzrR#;C$qyg6lz^*@op`TdVv?IBCC#v^loa|c~IE`NR(7SOileKRc;vj#W0;9>5ZL)&yVF*f#jxUs%&cGsN5 zY>02bvfc$Y?qgQ~5IT9?H>nQZjGDi66-XA(Y)Q!42F7n%7x5hoa zb7Sn64Ml2n`fajOzi>Rx&(2V%Og7~&nBJ7MB6g6+?Uk$f;3`TEMg5Amm8mGf! z*7@GQidjNKhKW46i<8Bgf(nZV!Xkz(9;3RL2O~KHuY_S|dkf8N5#9$64sgf>4xBZb zykpj=wY-~OJ6ZYzM2neqNn9vzoA>7T-&~G7@E5D6K?Qnmc^?>(K^`p7>T$ZpDYba7`>SuB#JnRom zf8iG@vuBPBCMG66$j5bThF_3$JlcYc{>;rCt!&53_;N4^=k|zysqhsf+dUf*@SKC)prg7m zI-p13C?)5%$S#dYvEjGPJ>S3hxwNY|w+JQUiy&;CV10t|QY0oi`iHNR@Q0WbX@V7+ zdry8Jzr>1q++X*bS^o6)n#^xeEtXjiy}fmXb_IPdpuLxCg&Y@+uj_juyQRdKg@On0 zZe8y6h~-NHHuvTG&mA$Jq#j{dTx9C*SF%?y@E2zVW4>T|De?dUc_$p5qN@3AlgY=l2`as`k81*e4LyK_O?xC2MtqU zT1ctwxvXQzF7fZ*81O&_>jiop|1B~L8j?=#=tVVZm8yY(fvoJn+CWk;MMtM+*$u3N z_CCx*+(!t+?c;`i{V&eh-55}r{8(5NM5AV1zrmryG@}JFHGjq~;+_6lz$74ZmyhQo ziiIB$t3UlK|LebjXgDnkryI5yp)p~1Da})<=eVzv#q*cA%N64~qE>AnjdHn0s8<2Fg zS~|1R&@a=r(kGL|NV9{bmRb_NwEI?osI1HH(HsVK9}@R7`&?1R>)*0>nq$6$)v15v zcKa{ictU&e?EM{{&RJc_qk$^<7+O4x7-qca-NPAKt&D{uKN% zC~*5GfV(y(Mwr$EygC1~S*-}wzt~5T{i`oY`1rQaykEpe&$%deK=ZXT;TG;u^6mxE z$R4J6^BW0a94VfXZB}u~M=-Ix_!Yv8;`|kh_N# zMxJQ)BZtnfo-alDwjFOoj(@ztf}Vu=n6I;!U5~K{A#)hAkMKA)vik z*tcE&D3eb_OuW6leSEAq=Oc3HA^5YZ#6jU~9@WB1($5?f6;;4Mbai!AsaR#DnIV!| zPcg`lGA*-<`rY{%7Qi&0-t3GM4eb=7M5jX8frt3n!CAkZ4Rsf#0`SWi_Z_#mDBm~f z9ct{Sv4ImZ)Uh^*f*Nj&ij z=8CaaSHf5_L2FGIfHJqHzs~OMkTqI&2zh6;ll@&@`cq6Y**>}pwU5{pch(mf8JYQQ z(LvjQ=x1{2u8A5?6!>!z?d|R1#C5f`3HON(MGq%C)13|2UR)k6tl=SMxL!P*=KCWUw0HH% z`I9cVYfKRAl*^Le25FX(>#3D9-_2aT{qZrWsjD9<_O`iYr^cl?uZw@Nj+(mL=?icA?;?_r1*CX zN+Ty{^+dU&G&}XfSrkSYZ`(NM`5$C3+|3^*)OP097r^3F_kCuPyCKOBl;@tVZa?d_w% z{PfQ(Z!rdlF>$i&<(9;;{gG=F3UgzQJ(y_TdNHsGngrycx;IKXg%wuQiL#jltZSa; z<{t2P`}IIbPd!MT&UMy(&<-k3n%Kp>eHiuCt!j1r7TbG=|9U@u>4kRA9*w9$SbAv` zU0htC`yLqCT`|fv-(VZ@k8X-|rABfL4}5)n>+NA~Z8uk^+vdaRhvsJ}rWwP-!ypzY zVHCa?!-m>{+Mqe6;wtbHItrc_8oYV*?&R$3=xr36@gEaE1x&{tJh*Jj0{Qxb)?b%8 zM!mZ}=B2XLI!kr>Rf{zA`LQq(sxy++1$Z zx+M^X=f7Fql7UWcPcOc7JyE1OL7u)Z{X)%!b;bYs`1@mDe-c+wxVx^7j@i5sd256) z#W0ij+xz)O=T2=xA*ePkSN5?QpKydj-}|w#eA(@-Ed$@vN$y3Dl?8WLtLyd@6bkLN+k>_H!-q3u zlb9Z3VYQsiS_6=snVGQ=L_DbVWYwurrD$hk!(+eooQBY!Ei zM1?YGfE2s8RJUFTn<9J^c7J!y&dxr1hxqL!rT5v+BWD-4#{GlSKZc6Z(j8%l{|CIQ zH|`FFet!;}{l3^bd$JBYuY5e^>kAeZJ>QBHebJu`ezq6$(G#=g%b* z3@0LsmQj#2{5L9V)#);Vxs5>1vuAlAuqhv< z74hAW0^KH;&06a}LW$VbGbBw@?_|P>h*B}2xVXwhkFU@XL(s>ZgGS~Lw3Zag|Adyw zJ2`#p=_z14=?YcW*505+j5xR`>u`tpo|`=ShNZ8f@~kOwbd#z#^Abythae&O^BsZ+ zs`%S_xt>NYJN5hbXO$u>{S_A%xil=_OSW()#1srR*Jr!eTFZ6|P0z=!puWE2Y$qw~ z8=Q#WFek&G(}#qH-p2emS*?fpH5V5bH#c7g;s~Mh9EG0>wM0ipLu+I8$XrjiYOoe& z!`l#_R`Ig+?rQrkbb|cuAoS6rN1aLB92|pm{t?$gOx-!$cJl;HAu<&xdMvbhm9FhJ_<7LJ6Ixi~m3Jm?$s_TW31m5b)sXgPf; z?Bi@M{=#0kVAxL|O%)ZkVl}Ue10$F{!TR}Oj_`@622)lED&rtjUw=at41yfb(wDb@ zQCCyC_F>O@yS>O^*0^O_m9Hlr6A<$^(&ey-adUmsMZ9-V)v3|P!s6Yp*N%70516K| zFY4@JZ7dwQqE9*b_?~C?_U5Y5xiEo|0hbo%dkaW;W9Kk`SiVQ~co;7A`l_!F;`DWn z$Hphqavu|>9&zMbAxucNH>>psmwn0?*zq!HAY;E8oN2I#-q^6-(D?ZH0}eBjm6|Y; zz`(d_e#H3q293^e1(q2Xa8P@qhyg9|)=aN*v6lku&$_dnt<=OEX2a;KB>h%O92cmPhs=J}W#!5z$TcyOEoi8yWP)DtNl7op+@hnRnmnC#LhNiM z_7O+mOZLvrPVkd#n8(WFn9!{B^c%SF{Uwh7g&2W`jWA-NunHug(VKAd+uz9ik(H>y~`p2(dzoJLB*tec) z01VUGcl-Mj>HL8ru(({=omWZdm#9IF;3yXCJp!CYv zD^Zw9?XZjdPAQ(dq`3Ha6d5nOYvwN_<2aQVtR<||4e>S3j^%&F&6c*`2X4F*_^%s#r%H7L!wRvw4(v}$?R{l`o$P>A z_uZF$fI{|Hb{`MWEckw5z9=x=P%VVwB0Qf*k*rOE8u-ya3pgStCno^5mEpelR8Vk) z$KON&*=1s-)2-fKaRAE5xbY9@pN%dqk~s+69(3V=9dBRje}5Ms%ZW*?^En|iG&Iy) z@0a&~AIr@h$C$I^*oZ`YkjfXpJjlJ<-P^-Qg@Gd^{QE@i?yCG=Cw>mC{r+};#N)#G zdA77crnOOX3B!pL;u+d(bYh2ht>6vdNL<#d&?USrfVW>2w?8Y+4wcCdUxmoKG+74k zhJ-3lV}tP`7$;u7EWD?P4``di@jD27_ihAi<~HG8p?V}fD<_O0fI5H5oAK-7`aTqE zmJbgNF)}etS#a?AwvO7crAD5_=r_Bq!QqAZ`Q5*MWfLRzQ_v(hnxCYL`8xV10HOjA zNIRsoZ@;aR^#bUJPcI0fv_HV{Saj&>V2UN-)qG>SfI?&A_wS#LW{7%}#=gU*Y<>&% z+wt_22n#jRMziYbrhIBWU5kva)uE%O2jxnO$Dy%{8F*KK!#hPqa#GUAJQa(L&Qi%C zoJ3Lk@cWBi;Y7u9KZfOLcF)sE&0B$kcj(ZGXD^tW@aiH))ikDb2T`sg33mrm%aeWW zX{m>_8AJ37bf%2^Mn-xA5R|hBPfcwtN}P5(*cOjqr2v!9XX$ffN5I$MPN9a~fJCsN zVqu{f9Jc}CzCtYl6BoCcvAIafE4mP)DhILGh&jIZtsTA8T*Y6kfmh-!92`_Q>^_H& z|B@g1Wy5vtKk@WgwzQ<=#Qn8@RZ*u~*1+_i0W;$44BZ}fS9Mp;wY4>Za0Ewyu6xZz3}rFtD!%C{w%@fbwYMgQ5n-Tn7%DkT=NOJyQPj8 zP*6ur%u$_{pQ9hx+TLDSSsATF>|oK2MtCMBRePqe|DvfAd+-r1wF1;%U+D9EeCToHr)Bx>ooC37U4E-)mM+(<*nUMGNai6kZNGNcH3iZvu; z{I~(Db_u!dGe5fefHIzf84F}P9^#+u9i??8`ye1HZB1Gs=- zJIEtxX=#l~CrD5&0)v%FOm6zxTHB`?2BArQ86;Xp{J7@bO>_v~86~8oT+UjT4^5VA zfUo5x!FH4-8c|0W%+7CfXAZ+`*P$PT3O6PaiUgER+YhPPh;aYg@AW4qCz=MHla4!t;N5-& z^{e}%68MZx@uLiU@c7Huzt^1lo5A`dG3jI2#GZf@?gIEhyL3vuSR>fB9!@8c#xroW zA_qB6KEKOT)0(!saF?xj{d0Axb0zK^_}CXwio9HY^X843rY1iLcK>*J+tn7aeaqoq z^ZsIkqY3C1Rh?$=yVHEg;&+c;J^~fM1;{j%EkFgC8K<973>li)wN^xJDEX@i8+xt~ z(#G1-Ir5Dp4yoy@*!{mUY7@nxsR`Xf-)`2=i$W|p2XJh6pV%4iRD?W0+9R$XDB*T*(l5-Ob|3_WKW&6*N*qI1@sdS>DO&x?7H8i1Q)h zg`}fjzQ8xY2PNY^J7qIcj{eU+j;g1AK~|NxQS9(P~$4vT>h1i;P-+Xfdl`IP+!@`eMJdPpE&-qVh*tY3()d zimKc3VqesZNjb~p+Wz9#*hHdm*HMHq5G1~!!F0-F9(s+C0w~zr@I~LMn(n67Nl|I9 zE>^SK!#;V($UZ^ioL=j5N3Kx){K*o%Rt#UnqmaC(*|PNRFGy=XKhKU(OujpXB|c(z z+2+lUffr`Ik4Izdk<_g%^791^vIkpW1=Mg?Vm;l z*<0&o;S8-e4E@RzxfzG@V~Q*ZigQ*&A<4-EF8@IL^E@Qu%a`|d6)0Tj@?qMO#_U3- zFbA92PYjI0jCcyG1SC=C8u4AO&wjt9@6uku#f8!%-uF`U0S7HDvnsdlgLfkdL+Nh9 zNp53ZP9dN6C=m^84hJ$^PdvmoEFO-P>) z$J-bCA;GqP52ui+qTtdt zF90WZ$%5x=7T4Mzv-SQ09BEtWywOK^M{ZRCH~<9g3J3pG5!1MK1V4&brQT5 za~_eVJ9U7q$_+&rB@%>c5gQ->p{hy(jd78R+upu*Vt2)iQjt2mJI*`PyI!#JU%?ic$sMz)DM z!!0X>17{ktXUcB>=NX{o{b|8lolw22w5!+XL`j5*RicPUIckEJn@grCQ^0cj?r&!> z=CP2-F(`GoZKjnpwC$XyALv$pVeUU1B{#W%+;0B*27=y*R&(?)x=vXX7EYe7f5L(<};(Ap~;dF zlaUoLZ<1ra5#cX2Y;XI-{xCf=Ll-~}lLYUPVbS4j><=nyD=WGtN_`2u{rQerTa-`+ zT59uFn-6!dFuSxdF%sBPu%Zy>WsZ_J=ziaQ{rC~dOT2iJuN`fqs1MMRsj=jm5|FzJ`VduO782o5Kc&rYzm7N0Q?oibl7M zjg1dX5b$UWg3~vp;!%qWDKa4+9xkpPSWH4ff`sD6_O`APQYz7#;z>N8fP&KV7iqzS zSNr>USDC1#W!li-K|HXZ0R5tgKvIfw&81^=uwPn zTf>4`P2cnDS8`(?2JvgaL}@tu{$?)Uz>lCKemx~7zCKtI>#v26WbJQnrvW%3CwE_z zPfkvro}J}8OxAL*lcp2_LC=w0PdP>d|22Z389q}`Qx_H%!Ut2> z@d_@{fCwcrvZkiy^Tk`0M^My2qgzMG69RXimxerFS7z;F^)xPrh59u%t?+fub%|aK za5;2L%paH|Kx7vIA}jyQ%o7(?=iw4mB@ZV!pE^ASY0Nk=VdwrjnVI_@ldeUpv6Q@o zWdvv5=1gM|&h(KR{z@+bQ=VjZYwGAUO)BGtVO<)o8U)w*r*vGG{xa|Lg zGD*4oY?(2K+;4q<^usP!n>Yn{PUJ5rA9QqekB^Qnq(H0X`szv{OQ=+(2>*S61`VF6 zm+fcc`NZczcaQwe5Hv$<1%DEku++ zi4EFuG_zcsoV?_lo130*UM_Q36g}Lalucw|NONLSV=cRBCR(C{fq?I*9OJetUEOV_ zZxQDbhwG5ZL*+G13Kzetkwwu*qbVtWquQ#-`eV6&pwDx{Qox!--FPIGmD+aH+ zLcWAF{wJT+*T>wxt8}9a%+Z;C+mkA4wACp^4OGZ~!d3nDs7l3FA&KPp@ zNvv0tEw;#$*04REuvaMg7xFN;tPhKg?=1Pga)$ zHKsajY5TK|Lo(ucp&+%k!nZC0a|VdN;}GFzuaU_gOWo>D#c7=^VI_*9*DS;HlW}sI z*szE{2UgQF^Q(9Ag1TW8 zxg^3=4HXp?AiZ#RcQ-UNbarOXA_BMlP{HMpj18vf1kNA{fybA2c|$L?WU21}2>% zSI4ERoV`%WHf3Q;71(8ESCK-()w-Z!_k;esQhtfyi;vtEpcoTDAKmxs9|BSS#y7}k z5)SxAML9W(ZhCo!Q;0mUbnEo=c~8^e)J%D$Q|C$ETlqux-e}Tc&k?fG(};n`!7YHp zNY!5Nd*D?$_Mzhy=J@Z@>3AXPsFXq`MsKbi`lmv+1B0`xigv@(iLC>A*47okK{C1} zAe9ELe%zaU+Dpyd%}RCtIhd?`VPuBnj7@)XE1vV>9%KhUKmF8PYH(WX|M@e3MWk4b z4g;E-n=3T7D2fRkwy5JFVVkmEClSWcgLqo^5jxCU@SEbQW(bY>fqq@Se-)N!fcJ^`IQK@V56>QAx{?xwxc^&WbJMwT6lu$6h6)el5ezkcFETdrb9GOQ4eV)b(^$9%peaSKJ+~ z-GDMzfHvzytW9ofMN4E^)j|6Uuv;&ST3a(j8E^kw=6eTd&?TI_v$tnPuHgpSLSil( z8b-!R3l7x44zp@N6ckVD7#b?Z;5~n?T#U*h#l`75`#9MDYCP$T^iL3zc4D1YF)_i+ zH|F5RQ@07LaO@C!qLVW7l12%XMK{T>*LnFJZLO2f|<6YMV$_=5KF;M z*Il!SjAI^hX{tEdN&YDB#>c(CXp6bE>7jPAT9sG}1i>TqCTvSzY1;^IX zgCZcLZl$H;R8D#=^;CJXCzaki15;%f=dPdH3J$(-Vj(r{Xs4;DZ?OvQmlaLy!N(NB zwd48rE4p_ox+#COE(sQSnpf;<8O3?$zq1F+lMZ|9gX*-y9^=bklv z)^>ra7>$NrLU0o)Q;8dta74?kxvSmga1{kAL2rM4em*vXpU%4Wlqz?$6S|Tp82zKr zmS49-H@d2bdhj(qFROzMD7%_i!G>-bgTWLV6F)i0%0>Y79jZk*pnQjhPkc_d)GLKB zk@z6_ru`lF)sg=)g%bZ<4E?VdZZWKB%ko-I?bcduZ)+9jC$jH* z^eh)s>F4Gy2QU^#J-43LEiHeGAbOHDjZI9L4cl&hM}Wu*|I2}VqQ_dXzOcXvQU(yQ zHer|L6G3)Qyv%*olVg;blK$up;#r9~iWA3F7PPt@la(E0Q-b2QnkvJRS<1+3{8;W_ zM!!8t)n-Ui5i!3#D1pihTInK(L3;?WNR>?6e_x4&YU=5w&#>}xYV7)`6j{WDsx*}>OlE} z67udsu%$#|X_(9fsFuI8BfGFv)d>{Y-3>f;Log+seNf@nuTQ_d-DvWiPaWjdN@HO( zA=PJmD7>*f&lXeNQ@GQdS&8awon1|(?;E#}wWj7{6TecQl#ne9Rv&`w@_V*{w^b=!h0G7aRe35U63kP3Fz5pciy5T&-Yj z|3#)jLj<3vjGUaDU6_?rPA^495)(%;_s!UsHu@} zH81Z?TmmB>!G#sogmJukpyCn}=O)cpv}%iQ1v7QD?wC#gw83A}(|v$K8{}A0i(vX! zA|sD*&9(h~W`CMrx^rgvP{G+x?1c?Pi*)88r93?wyNSF4Tc8-iuubSNdi-8+x6jEn z>TD->nHk5x;kU6*f2QB-pN<=)QVcGy1R?B2Yj(@M1>< zipg65NAy{Px=o266j_dXpaT0YjE&?~M}i&N%W)QGosy%g4|WqP_w?(f15>t{kjKPy zKZ+UBYg-(v7$nb>6OUTi6;+Lg1%}5c`r%qp3m+=Aw+BOh_)dM9iklMdM$xk?w`yku z3yff)18Q7dU6sG@0%Clm7dR(zVd;S9InOT2c*a-D;^K<(q_F3>)Z3aoB~K@@L60~- z&;7fb#*|f51+;jaOV{RY6lj%^M1eE^YJ?-Jz(+7mp{z4Sl)K=aQ-b_kA}k zoIfh@`_J90!nML+(?U9P%{aRlii&=+ZnV;Veg1#6^SW5T)MlqkF9Gt2P0Ef&5-;>7 z@xH%iQeEiEWUy{eM;Y1WOu4SdE>TZ6L1Tx>_K@X9Zz4Cp$150krJAIY-0*6#Rtx4iP;(xugLNz-~EtFPChNvO|b^sggZ&#G5cbvr-qe}u&o zR;Lug*^vwB`b%#Zxpz}ueYd?#-4nh$cu0OtgMCH7m*J@EJ!WHD(ff-rL5aS*yE}!L zXK4`MsK~}VttL~|D-9<4v=OXz`;9|wFM+)6cH6Q^|B6PctvrIQ&+Pt}YKGC%(h{CM z7zk"xG8s=N4BkUe_HcT*9SZyRwJJ#r@2?FF`|q>g-iMpm)VL6W1_Htc(l{M)%X z?(tha>w@o6bJr0=oyAX=Zk(>lWaar_)z$)awh3+6`iM5olWZ|-DSgw?j+DSz5P)9y zf7s9;(W=)>kS-2$`(y1)Za!0`n!q&4)pljKXQLZXCV1AW&E_IJOeyAnl%mIj2Wp}p zR+U3%dxIH7oCq{Q-0zqQkDuHjYZjJ0Y?Kp4f6sQ(bk>*hNH&rBU2#}V2E9ea5h(PFWNwZ>5R;x>zRgzwhdIZTUXvv%T{sZhAs1rs$Igs+S9W>j%^#uk zu0>D3G?taKJL77tZ8k3kD*33JE#cMV^WoPb0GsA^()F06{DFC2GC`GvQ3ZmM^7qbH zr2H=GH@;3e$ygaqV#gQSrs{8@d%X#5I=ds*liq`A-1h&%DDdQg zw49Ee`y2R&B-&9x zTkqQ5;&zR9mn}{chk`4hv*>b;?yb{q-+<(bHV0JHFoW@%&7IbhJA{ zIsbWcIJQnFezv=y38UvQT}vsHQtZAQ#$?TtH>X_CIbn|QXhCstgec=c)newCq$KKP z2JD9$(6H#}f;ajFf2>L+WAX$_h(UCKRs+QhbdG4|VKw=i;fdV6jjf?zBVh*)4B zK75dqlRG&-_xAqe9F3;&Ivs7}^}FQV)Uc@0JepfurGP!FF>6zgL#ZU(-mwxGLH(|h~CKz9VhNK2@nzsiVw=U6+7jc}f zT7}2q@~>ati#2og*lI*SVgsEYiy;tW0gw?$rL5Hf_w^+3R%p17JEEO3SEmvSJm?tO zmd0?`oEnF1rWc8RVCl~qo1V!jFu_UOoC8Zk*pt$>t1#*rwX4J%h$}102)$g?ek39K zN`J$@9pui41tgXwCR%oswdxyR!=|`ukL&HdJw$zc1O~LZ$#C2A77}wyX3PLk9GVN$ ziW#;L1UHg^r;nfHe~g?tW*2)YB2@s?Iw}hn9%>qi@ot&MK)v3eIzBd-?mB)P*|3>X zI~S`lWIQR^C}8y-LWqcGp8_GIk7&9CJy;U9$_$eR#j#7dWV9kjdR_HEi@>CMo#d%G z319o9Rh70zS^8-z{5OJTJkHW%!f}cuMACVtr4ftE8)Q^dXdXgMO-Y%eRd7H^we~?t z)~z_Ra)Q0!;^IQYzd&GK^)q7KUwQ)b!Idrd{l(`BE@UJmJDZ!*&s_T+Kiv3@l^dJ< z`)ewndTaFETf<=*CZ@7)DkguY8KoW&h7bR1sby^Gzco;7f~k5OudfCILtd)OC2#mz3=#5FEQa<8xF6ZzTk{xXyt3 z_asQ;Yt?gPm%OJMs|3Wy)&{@QpD4^0ydibi1MD3**G(o;`E|3ulWj@h4m>64^7d?F z6cUu(fV@8Xl0Uop;SZJ|OTa(JbAo7#XEoOf6Yr^F5L^PfaCGSLGfFcc0PlDHy4jzg zJugJiH_u-4P8gsw^HT1zVB-^OoKOmrnEI+JT4L8 zz=vVm+PV<}o)`oD2cH-b;wVWk1Qm;X;zOCFnD}YEYyQAN&BBTRwX9lIhMWJ2f_{Kg ztiCpf8eqsfDuF&ApcE7kNZ%=9vz3z@97^NfY&K2^mIN>D>EDl;J03h<(EL#IANeXpYERuq>HN_7fQ&* zq6pIN0-QshC7YFZ1bgaqg;B$jvkOtin;XfAIZ~3}1Qud}>EMR$6VJ#VOKB!%`{;8i z9(HfPKeG5_w6=kPS{uY*jw(P7$jSgF)9Fd}i_f2lg76gvrYzPy2Y|kL04QOH=bpNQ zY5G9t0H81^6ovMPz-4C90E3sW&7bJxr_M&9;jG`s{&5LNVm8UK2V-e@oYw~zjTAe+Ej#sl`Qw`-1G9$N{jEFoM6ex5Li) zg29G~)e$@oggRZWen0L^Y44C#$V`VY+V~&(A=KU?SilxME6^Hjd#7M!lwuC&3kF<5 z-|OcnA_0lv^qT7)Pg0*9e@bnC75{aq33}dA0t0mZOLed{JAdr}Js99!J_kb4p(;yk z=!|3XdDRZ+p;c4IYRwfFnN1 zSI%P<0kQ8is?Rk4^5m@BkhU|Yn7#^MJY6g!18CzGnzh~oUh*Zf1xG5l0q(cuEG->& zEV~F;+11yNgIpy((5sCKZXvt?%ge)aVL^os#nd)gQ8Okc!P+$k7pB*?7-QdaDCoX7b>aBb zvk8c8YN?D;29)LaxOKwZYKL67y*(>Cm#p90HaV)UFfiBeZpZZMSxMlq5clNH$}f#_ z#lL@}PojY|1cyw{_lsm;-FZo^>ud{A(~qzq9ZE+>M?T+Mg=V4MH|)h5J8VRyGhGuK zTgLc1N4#KJF4JW_-QC2%fTn}S~ zle3_ZWCVlW5`#(YuioAj@dpqQpsKKK&`1Gk1&tA7G_>^l*n%MUcHKKX&aW(DI9P)- zl)b>kgqY21**u`zx<*=;Tw(*b3I$MbP^?uwFNO6OZUWyLYg zNEI5zCD@xtY?X#>_gtar@lys79(uf_V1L5w-IGn;=_t@L!ZjHd>inDxlm{KJA~IPe zq@-5#fKhq3*om-KZ+8a=7A^*V2!Q#Y^DTA*1^PWix`C~B+f=snfcOp&<-p zZG_F9RJ5K6Q^pg*-L|~_SA?c*NA8|J;>~9xeV_^8aIWxlAnNqoBx5U}JiPFEXl63H zDpDNKnzE&HCZ`hL*#JTp=zM^tHyzM)$?Sy+8AO?aiKZaWc-%c@Bm1Dfb&{=ErWJ5p zm!HpoI&!c)nLe#iXNsVD^^x+K3{T__l_iED?AqiC9*(t5CAn%x2-zXzM4&>4WuEx)l)2a=I{c`I$Ngc z5LKX(0s&DYm|wQ`+NN6ny>>&ZhaWNd@ZFulkBa%^k0yb%+*Ay`Q}dC|&l^+=fgU}L zTdekHc!#kDi3uo*>d7i6wB&i(0boNOFIBTz$K{MBdcFSsMD>1luV{2=^um%oFRQj_ z`+;6`=(9|Ry|?eE$rv6oVU|FZ7@AuDDTJo~!P7fLxRwM#hU>oPNc)Z$3J7fb2X|9mB-x_=ljV)f#4)%MhEZ96Y z2bvYDc3XWr{9$H*{~}^M6I-r8efcdYc?rh2W}*#PXT5wEg%}grOGqkg5)`Y$q#Kh|ei9!CS$(2E5$j|wRsHmaezu(EoU?2$D(3x)% z5pt2d+n%FU!$1jh{Bg&=jP4UNcY%@|Wy|5=_lgplcBQ<6CNSd^zJxh7g{-okZe`l& z>)~3nJM_oV)hSrmLtXCgPc111DmvKh+agev zdHhcGP975KHzPBr#Tb`7bO9vS=v&#zaW3n?*8kPsTZL8iMSY_iP_Y2%kWi3j3rIJT z5*q}hOS-$HQIr&r?naQ7ZV)7-yV)SUX@O10{^tI_-+Qjl)ww#{@jS5BTx-oa$C%?6 zBZ$jkRvTd_fooCsf06a{ysRh}aO8GDM|KH2e}6GdWb-=%m5P(Br04m_No&!to7>nf zYSd5}&+y8@?&h$vSyLo#8s}kreD5u?fc?S2t?-DUuu1QuzA0jCTM}$dEdvt5Rh_Gj zWyj9~BI61FOAdX8$wU+exOt#%J@fN=j}k*L4bZx@i=~m#9m=Ay$5ytw9;Ly~Cb&bx zDIim-K2-Tmzz9R@2Y7#OpbqycQMY;w10$d)d9>Sp`<^R zb;4E~00~nX3J?R&{`}8~EAg5|kUPWlT#E+Gjz^40gwZ9dDyotP_g(>y#LEGB_hPLf zooUP2QS>gIT~)PA_FEadPsEtncS?2q*WB(xq&_GQmPzGe{A!k1K98mWAQXsS1wVdR z>PnU(1U5R#r;Y?lPSVCq8aTm5bi14Fc=y0%)3IBMF1A(yL=u|1mF8k@~5 z398yie=5R0Nv|qqQ9tLwQ;Ml)E9r%P_6@<>HQ%WjJ?x>nwI%X~7M9h-)Rhmt6YHQi zcXEX-Ip?72jVVBXpNoM3b}01cErDyiRh828uvA2sVE?Ea8fMO#6NvQOmz>d^p7v(r%dbna zSP&`sOGT^e>}BkjD9O;VfLOP*^j@US!>5O8+k0m2(~7qhWRwt zRCVk}Ww@MJ`@8+RDT*Dqg91n_?;ze7)#$)`o&_%9B1w-AcK}W%lJJyEdzRcoxwz}$ z;);TtoDu&{qr0KSKlBRk9qGD)sn1!P(2))y6{oSL?s>cJ5~QvC(`_)XNvOp*JT5d>A&}fy*`6FRl>cim=BM& zg5nh!#$N@+Y)ZH#ze5vsb-%dZgUG2e5fMd$nrHP~Z{-7UxO&Oz4g6ci=qMS7FyG%F zSE2MRS!04MIRcfX5YZ#^q5v|_2#Z;N!H|FmG&P*h;<%L39HuK+F$YD5;vGJ)Xne5t@7WjWO zgu-{o`(3~Ss+SoG9W>&}J|Fbhbrzj!88jnZ7mP5rx3@@kpr`*{P6Pxr7P zp-yG1;9!V=ijUVJqJfGG%%LEbATw174)OETL01P1FF?lol4|q@6th{1BO;`G*ZDPR z_1O0OSbnriBt8~7Y}gg|Km1x=R94QVGrL!st@G~7E4}!xfbV=AY(`zL!pVLQr#5X{ z_zeArGq0V4yyH!{Y9Rnu`T5DN&Us%ZLST5fI5?wp6cP6@bi&Z8Ex{tGC>|gc8DK00 z7@m7e;Loz|GlQBM*xj0NQl|u|xB1M&B9yj;cM{zmGXKYGH2?8EtC)$6&6KJ(qNkI6 z;^#V(jr)vog7C-Vr{-NX#6q8*`n^PzZLGBLfci7Ts`T%#%2V9)yzA>(vl!yWY4Lna zOnksj>zMm4;4!fE3yvSV9RkEJ0JjPhSrWH80qUJ{ktA)Ta}J$U+W_0>`6BDXK{c!y zJAMV)A7j&Zc`@cZf-=}fea&Laj|Mbg+S=MW-GVNGcK=gMw5u+cS7`6W5zGBu#NT~K zZ{uh$b;~C^gLK#dNZvplz4|N0ATESJQu_j&GoqTwsHumZI7FM>L2&3cf8hMhK|WZs zat{H(L!Vzch6&?+5!$7#;nZV%J>B_7$v+;Qu>I{cF`A+wWo)a>Pc*le%Ys zoB!_yh%iCrl_ZUdw0N?rif&pOdUx>A=2+)#|n7sV_?T8>G-Mxyjcz}R?Hi;b) z>(HpYB)1TY-I*@%>*D2EUJLoX?-*iWPmhXEX&?Py^y1RhIJ)2N%hL?L9iW6um(y;V z`jt|gS^}oeH94uW2q$^^&gq=B_G;1EWb{ZzI0>|mWzN8iQ;TAE?nZCFgia;TxG{-Zh? z_GF*VGS+gY%!&mq)v;>B>fT;{t`&}1t%?otM|@ohmxg}zbM(|VH|g*Ep z0>%Q@@v7~=A;UcE}I%1mt~!?U%sTepw^?tza`QsEG&K8x{45Usd{ST0|R;_dZp`f9%4 z^kPf63oS)yp(xNP5ydBs8*tpL>EtUYQQX2+HwpH2sdxYyFY3-FD5z86lv`BU+?U0r zt(^+rcjUKqA9`<86wx@=2j3xnN?UNx(Ro`ZFrsvh=8d{&qu}?QJMK}rQik-pNcyc) zdi(rIdS)6mdH2jJr=6vQs|MWW9_tZ?p-(iXK=ce2ooX2nX}H`n^9l$gOK^!eUmTfP zxJ3d$GR%$jIVO@aCrMRXyJ7q-)ir-Lhd62pTRDlG0dSHteV>&5_1z{K3bmYYn*?)5 zqE1h*U-^;#tttvgE6Q6_buC9XHa3Q$czv9`%Nm~lnw{?$Mf8~~O~2PpiDMdd|0j}} z;q|G1?ZX^8$q*I&Mx)qdR?THT6oww#;|Q5mHq7+vF2iv^DEy*;mm%OH!_N;Opp(ad zOJ0Q2=+>k7`qnHH1;3c}_eD!6{q}$T`7^<;UrNF##jX43xIY7DIc6?49J5u&_h5-o zFJ>Iom>8GYCcjV+{k+ z#nKUv{x@iXXByA@huDxeXp+BzC=kQ*io~R(Y+uJ5k@(Ke&xb=q8~}8YM0T8Cu=DBS z3*$A;_SvRVi>Z$=i}6F~05-tYy1Pg2Sz2Tpp0&ZneE0>pWXIe!idZviFSy{I8pP5` zGD>0ObH2E)bC2@NZzLnH20sC5C(!6hS(WYP+VbS~;Jt#pkUp6pb4y z`A~PhNIchDc#7fW_eM^xu)LgodfJqF0=rV4viQL>@wz(YmbD|Zv^y0pQG+o6M0E>t zzr{z1p{~sm+1&Q^-sNy_ve^y}7tfpMd#hdt^*Pi~P}nIDtmaKVtD&SzQ#L%tIsxLx zmjzejq0(shdiWI_eHNIloRhyM!U{00uB&?_n~ zHqQHzA8G#ENBkmxqHWbx2KH=@JC+t-n*6U5 z)n8^?o4(i%XLah;N=@kFb=;Xl`Fo#X5p}^gJt-BGKCIA$ybipBPed#8D)Nu%VuGx(7DXIhd=RX)#C$ONLfS7uAmyxU$+$IYU`3wlWs2 z;x$xsr=Ta4HN9$H9*fl8W*L1q-9udD!5AB2Tt|MBar7`{<#9OI&DN67)mgX}T%oSC z*FK}tBPB_deQVO|av}5S?U8E6^v^i!>4QluoMrgi9mBfP^5PZ!$ItAENJ)!%M(*Cd zo1C1yjq_#SuMlVi3Y12J3Ju~RR5lv}$#ll9l9GWim7){aU1v!HSv3uh;<;yx1jm({ zH4TwL62AKBwXgMH>YAE>3=+y18FAw(kuZz`;OI-J?e%nWSb8Nux>-eTz*H{`Xd1>QM@1krB`Y;;hK(1%;vYZB>_ z1c?hP5{Pb`gU8T(BGR7r4V@}KJ73MUM?F_1D;*3xc1F#V((J8qUhCUqhkTE23sRG_ zm)Q76&-W^SzkIpj`3Nh7@pxlU$oH~3B|}0oFDd?fvYN2Rut8h)$MA} ztRH^#k!{g;Sf~V@t6bE=$kA$@>Ppe}NcLyhsEqr%CMIPuXIcy^DQj(Fs8gKF3Onc+Fo7h`phWy&iIYf#bruqEh4Td zM1i6ZUg7AZ!$u-tcEq0ieqZw;JJh{+Q(RzQX@73s#7VNIZ{ym4Sr9|V?sJ)K zS=68jgTQQ$$NiPVD3q_!Z^H>$uYR_jsz12&$p%o200=DLbZiMFRs4e1`n7e~S~WH& zS&i8D;km)S#=hR}{e-;F2d&4E#({wqlf7_Yz_iZmJu!AnW0K>0x;tCpUY(>U+A8?r zoMZV<`EkF8Os-nucIi>$(W}K;$cnhXKdP%xuinw<{rfEbg*r_9EDQ4hlI&#M-5jf} zBU^>kn=ZamqsM~D-p~l$xRi|`W+Aiw0iT?HiL>-vLH!cj%y{YIokrWy$0IAFjk6gU z_>!wdA$5BrZ#&LY?+$0U?X~xuAWz>+kB9F(EQZS*a|N6`u5^adT$7YhXFV9<;uSB* z7d_l_JhzwY$|D!_XqYf(pCQh0N346lP%CIDX`Ms8jPUj%==N~-xc;C>^lySZWqLO* zhcb(`(T;v{y~5G<1w&(*35_VyDSFk@U&7NDCdJP6YO#{rt?#b1&4)V75IlxhE7UtF z`&OTe%9QN0y?NOHnGuY}7P!gol2NT5l1=BRxyy)k{<+2HsmOZsM7BDCqcF_JswDrnYDk|#nz&W5k^)3xPrRc7&-D*)m;y`q90s9im zBp*-rG!qx(FKH9}7BQL&n6TB={}#lQ0y2P1(D7fW3hxux@#e4t$BrV^>E!G6u4{wB z6XK>Jd;Ml^x_aU_ii$IBOFcN&<3fvJvqt^1CK1j7LvXuE)+sAtvd)6@^Csc0)l>%t z${EY{Pj_P^L#0Go_@52N#IN8$luJ!|B0VEdU;#4X7&>V*B5llN#y=Og{&gJL=F(c# z%{=nxy?s{`=GmDRQO=UUOYfj>gkXKElj67;>0wW()>lxN)4u<8@ms`xaL6ur)cz&b zmnL3bKCb`R@?G6vhYR2PE7@>$KkRI29>kP{QI@%rLqen(V&(D{fdMdK7N<%;MqY__ zu^FTziNj{9q`4!Jg3W8zYTA|A#@n`d@tahhnv3a%fo6PFQsuszroF~oS_JQS-%*Xx zE{}wsJkI0uACxm~V$T&<)9(G+a+MJfDV8MANs~uyb)o%V$-@0bL zLtVMey=jt7n-r?k*+lSbyad=6uqLP9lTuW$NL~w>3EeDxK4ZZ(%iHAHS!JTPp0zjFm%uYYCq=uq>>xp6)_%p> zZk3g!W%TF!cFe_4NQQIFi_i(7C;4;gn68xT8zs%_lcf=1<6znBZ;NNi8k_`G8`tqz zMC}}A29q`n_xK;}=l7))9m3>8LzP`0l#Kq=Uv5+zbkXzhP;=!j&Vky=BW<#}=4JJ& zr8e=1be&OtG%Z=eAO3Fp)73MefN}6NA1~RSs^bY#8cyQ0tl!{7=Qa#6{|q?w)>#%5 z_~~6%l_$hkbJX*dqDuw8Sy6EtqO7bvMGAEaGEEC3IVY6M+Aqw9xjc|7Ecs*%OkHJZ~k z<^38>cJNEp8XJ~j-PCjy?gLlRfgZ5hC++!H(O^n7)(Qh1Y0gDV3Lxc zt^gdrX$^%jK3JcN5iA2vBRGtc8Kyv}Ql*|SmzHw7&1mkP)^WJzKuSe+1js8A5cmf`TD?|)BA{$A$wZE@~*J2zA zp~Bm0sQs?e)YXbCh1eHgUNsFjDNmNCv7K@L_8)W{{^>84O0LGlem1I!JWpLr*kI$D zDJe~jmwSj5LBG^8OkZj15nfLcgzjA2x_6%C;_I1`q~A?lF|mY~mp;gSHfn%IOf|fK zJR5qFWioQPEx*SqpVgw~>jdrht=+##bz9VLp|$>7!m}!>iDU95@SDM< z2Z$71%#lX7|%xzk3iWEL=ZOAu1|}&;BLnt(*(f&6B1_ z$k^D}apzaxaV=cC>GY;HiX%KfzD72x^3eAH!KjQWRHypS%HyN( z-QWIoW4;@=$K@I#oGWwl-ToI#z57el=f{0%X_U3R*L8>EAfIO{{-gS$HAvCXHwft|f<+OvzR zan^nj|H&)99ly(?oB0dRCi-Gn2A2iyQ%Vdu1(y@Q^P97(=A)c?;T-{e3i%x9e)K>L z&vDoK6s~OE)9l0(?L7CM;f+BYGc`-@*4*P6t&Zj23(WTc1}LR4bhHkoYM&0U7pYk; z5K6FTKeY9`r9mBA>>KWp>#;ISwbGU{zcfyzd%MHh?{OX6U3`}DG?U5!^|EUdUhF42 zwfF7B+@z+V!QST1RaO6o{v5b5-uKH{(57-olSM}#5@TW?IP>mUTbdET(681C-!~7^}yE$c8->apFuH?rC#0E$yUKF zkU3aT9szsL!uJA+~TRZdq-^x&#mSN1Bu(Oob`h@jaIb$YH9q7m~6k6@hZ{Radv4~`= z5hAf@`HRX-OGz1--)V9YJS3yYnIs2zXp$&k^Hdr?x>+nlozG)tT6opeect|fuY5{$ zwk#yk{K`Zgs5YT?u{;ly333mONIE>`^8#vyc2)NZl-s9&)S;cK$O~X z+Oq~MjItlBVLHT+?O6o6-t@=lWU;F{ii}*U+`{TgJj`x?p0*8x`o0*$Hv4)qWu<$#zT#QI7_&<` zt6-gdZ5>O(W}=b?GA(_t$T{OjueR_$+yy5zCZSse4TJZP1@@9HAOfF@I(z7znA6SE z#NMwEZ$pUpXJ+U^%hnkRhfrX5Z4-I-?$BWT#ofy)^FbYLoe8F=>%77F3fl7~gwtsP z-X+^%p`C1tuZ`uma2uS$X@&Mu*&5R@m)!b7L(BZQ?%g8*-dh}eVqS3^{7G4pl_(5= zlxdl%`R(fR_2ET}DQY|dcXlL3%tvI?*bXVPFj{@nprN(!KZPl677#ax@o`t{`jQ|R zZ`ib7Y+2`O6ekTTrNXxy1=HnIL2_irWjDcBrga=Mj5{Jn`ro2lq-H@!yDQ334v8rL zi$sCVTi)Ye?7TBVUm0eL>QZb{xp(@e*!3H^LA@>=7yaBAAfC@Wy}0%)nPpbDU-)y1 zH&mismEnJTk>h5=x@|*&Q+lu0wpMP%=o_0GG~zq)(~L-Nk{ZmL-MR?tDrEZT-RS#+ z!0&HoD7`wiK(nT8{OuTBxW_^mT*GOZg}R}s^h?uPC6%*euzD?PL}LOVXbdHDG3H^J z+%$$=&dHb*zpnj(9c1uo+WKjj!ZaXTcUr{r0$xZ2L~hEG4`jD&rhuDk@p$#;^K8dg%uz4?j@}v9}ai(+2+&BEB)`i(`7KH z&CKDRe~uj&#}6|7Qjo=ln^Yg#a&BRg?n@I($EMBem*(c<=5iXgHVV^sT_3Coa`37v zxPPzty>a|Yt4g5^QOo1;C+v4?MXUR0j&yS>dT7tqH#VmFE)hL(Ua8N8gi`ko+AK1C zYgMLgE$d4C;UwY|JJt81yNy2^YZ~W?i*b!lmM?4!iivo6Xa4v|5|u$cEqrlN*18R> zF8X-mw23|Ucom+QkiXoidS_SrhF4%I%BV>nJ4nT1_wBVXN9C!Gg2$hHUftYQv*nI2 z%~j|j1c)3DaIFZZ1t*J`8hhT!FecV+(1WL)Mkop z(F`5A-|Ge9?}u3{;rlqJB&(S*?KY{EbOPuX+}$i1QM|%eRh5@8c~X#FX$_ zo#48Bm@cRXOzxzv?-R=G)oY&Nze&olOn&30PTK2!OX(281(D|;*o9n5DtG*H5qYs@ zzGcpsI?ee?Cd2Frl=AIm9DMC0EhSvZnQu%ucbq=9zP!ZHS>?E^lEsc|RW(^z#7JBG zu5vbu3o(h&ojE-4ZEkm2gd@zOk2ZQ(S^L-3|J(o|W;|icf?IhKqob%GLo3B-x_7_c zPE6&D?@Vx$^soF7_n#`!#+l2ngl_yP)iO)XZp;_FNY2zwt9?UBwquZp6UK8I^$@%c z!Yq?7{5MY(yP4ieu+M|}pplYUus{V1nmXF6y>HO1l?t5wO#IdAA0oYTe6fv8{z_gG z5tH)p58&%_Iz1m{Np^L&S;PGb(tE*{1ioh=lq{DI3@GN>>Q9l*-pQ&!{P4?AmAkm? z9;^*1;Ea6^I`wq(eJK_<@bB?}o}tMLm=(h|#SN zJ9`#D&Im-F#udQdyY+ZTNH&U^ih=?dE{_eS@*Rk|A1sO-qPT!+N7(ZSxEUi34h|5A zA4Nqe2^EF+M=Hz#3uX{kVXc+Zu)NxKt~yk-DqqngMlDo=q90yXrv>F)O#aJFgqX*C zIkNOt#o*Sua~f(9rM~SbbNTDfJXB6eNM8{%iP>V|^70aRx)`*0o%p?#k&yu|F=%IP z;9P-;RMb#c)1SwWCUm)Fg)&ryu0$}aPfTmL&gV?bSz8il!Z=%BQ%5C(lnD(t3Ti=ZTP(~XarlSiMAbpR;7zqcm>sMx^j;^tq2=kbOtjV}-x5N(|> ziuYu7c6N4eYX|8J4 zl0`+IYS#1zo9WWh5TE1YBaxVe{g9!L(N#SxLVKKk?;zqF3G9Zq?btM*`S_wdw#MjF z2T2;DfcAC398r|AZ7w^rojpC1s%SnA7QsxdnvS8GHhJmBCe6XqaUQTUb)TDuDt+_| zG`*iAzVx1VU8c1o?~;c;1v?0Y8A1cMYY zoGE7549oeMI*@*9Nlo^p$+XmV<>vlh=FlNPj%*Zz?Ju@80gJ+awK8CG>&eIjwVO`x z545K@Jfeh|hkviEjA`72jA^A33?fTvo8>DhNn21dW#aymyn(&8xGUoN4bEnR{i8pY ze!BDL_`LgLhkNlpp~iz;90TUX`)@k zY`JyQMdE$!hW1rn>n+AZR_cizEiDm#17zgnpm==;L2fpbCO|90$IUahD_?5Nu^3c=MAC9Fw zS;V}_In{`rr9{RXnAo`Zl@o1IO^ldD-F~bv3bt#>5AvW7sGk6CQ^LYNVy8Ywy^O%o z@(v=IK|sK>D~uTYP(*ZF@-n+O)u&W4^R^hU=Y!P6%)-Lu*l(noM3QJMDN0mz7l()E zkSJd7?&ay*9s$xs|8>=HrEit7crYSfpS0&pG&XZC>#iBDL~5ZT2OV*6yr-RIcx6UBp^Lxt zIzdSmiTAj|D8fAyjQ&GRQW8Hu{|B*+h{(u<#6-fZk3WA3QL4N#S*6jQ*ZC@AvS3#R zjz@N?`c0mbPh$-4UV7_~YTJwj7mF`}Vw(~6?0nj&7>AT8pO9@cD@Lc9m#F8_+6_PF zt5*cBK@8$pqsk_tm`EEagIe>cc0LJ$&pMT;sb$EDWxw_-`^fMCC#jI`b_6trEx1m6 zY1!3ON_0-C|6?@Vq6WA}c1A~uULyST$f>Du?IDhs-2Y4|T+9_^AGw|;g#iZ+sWNdfL7tJwS}PDQzXYpff5s9|A`zghwk$eT& zq>+>PFu=Zie}$c$UslF(L^V)jUn`3OxrkcP6i-DsMrR{vh5z#cEKM&REFR{BXr|ryAtlc>Sk`m(LTyHK<#95=-u8$O= zwPj^xe>=1P1sPfE`ARt0vh^BWnGq^@SQI8-AUGdbm<-ff21#xp@EP<+4fX}= zYu6A`!OG4)jM7+RZ@V7bj93fHxkKR&s&xg;jQY8wA>XJ*jKwYybeP;vOew15qLoY* zCBwA1!K$#m$cndP(p@r(MA}z^ZNu|@iI?P9%E_)CgcCHd`!j6o1#IpQOS|kdeNU57 zoMmy&GUzL2?XQ)snXBo|>K_;|?Z@7mwaBbtan8dQESxWj+Y4Fz zs8ThlQD2sn{^*(etSv7paO>)zO>DLhoJmpUy8#-id3ynvNbrWQw;bXjIA4`bKxKUH zrwe=QLdo=w#b8QT0-m%RY_L(2ZvK9HooTM5cFO5jzzQ{HwemIR3X zu*x66sQR)Y4Tw4JR>y!E+{dZliK!`qtdGwoCZsNQz@2K|BD9+|g=) z;35X1c~T|Wf6lzDTAN)mXZ00v%Q8bRw-@<_hOGaRO`|Dd!vewy9f?-+yXZFzqK^dU zg-wi73-I&gW(DkQ_+V7FKiJBB1uD~Xp-WbkZ>B)!C~(@P<>ahTFK68jdlzJInJ{UY z$Xz-&r@0>D$2>a0OiDD$%pWjABDFL;EONFWrEmqjnQsi^G>T7L=z*^j=3a=TS%ubp z^e7cAxQRS;X%oAVB;#Sk$fKu9pbYi1Pj&z4#+x-AyQX)ljY^bc#y#;!DHh2@hj>ui zC=A7ZG7i))pWb{&fAit*q2iR5kDM$lHse^eb#y57yhxOQH)FuPPf8{+u%UUVG;IWr z7Ba)Pmrm~$7uR46Pk9f~P-Nqo4uxRJlNAzmc8|gxg21_?MWK?#8I0Kb ziFiqq*NOGW?7L30XkW40^HpH-E-qwsn(W#JIz^zZg^fSbavOpGYHGUX$4~b79#|^! z6jgm=+f>Vi54Bq7t-lQbyJp4c4>rsW%Brr7{6r62UH^RNjF2(mPWDqY z3CrQ5=!^q8$hTu;88KMDcdvc{D9=>#LNcu*C@2UZ@-sx@k3BcjJ@$tm-?4;a)K;I@)2*839xH@65EQpz4E`A_9sSN+#L8X!Qrqg z5stxHd$=Is*fD4?Iy(&s_+a~yB>SSS?ZytUs%3KJU@)^($ar~8jT;zfdE0G@FAgL` z=!N++lK8l|fzJ?h3%65XMg!?UQ+3D3sIL!AvcJ+&e*;;5CAz=G6KbI9QVAbwyBKt6 z_PSw%LT|Q<%V}>Kfv9vB?U?hq2XN?fpQHm@M!g7sT;bbBOFkcfW(8>$Sq2=J-42k|wdoXL;KsiC=f!3HKM zBt%z%3EZuedoqrW4R%3vL-$AkzB!chkd?!6Fq+c+iiHI||Ac(^HUE{)2fSYG>-(R2$ z1wOu~?+|FJAj-|XCJ^ukH0$ZEt3_ukpTX;pVH62}hTs`SO9t2t_XUR0@wMBHjEV{d z1dBcCF==VGb@L!CM)a^N|(){k?A$C zQ-R{MQ=_c#{CkPVBf!)WAV&Dg9%sSur6qPh@b!obs@vP!!4n*o036(=M#n7(B);Lt zX-Q0e^v*u7Ut=8LL)!6aUhkuniL%tx)F!ji5)w8}ih)~v=oau=%YwisC+YJ;{+9VA zgbfQG<|Z*Eh;eW@u6E-bUNGFk>l`xYeCR~+x-8N!jhHl4eePARLKI;>pU^hM$brkf zx}~Y9DX*8FAQABsownrmDpN*al~mW(ic)zq#zCO9c_DwcNptrZC{HNe(ckf7O)A`8 zbMp)dl$O0VPcVMb5Z<*sd_b2UuAFzMj6Z$4q6zFeZ?P@qVkW+aqUxXC6}EXJjDv#{ zI@|3}M$PXSaC7&)^~$9Xu=ED)X@<=J*n|q_lsY=z{VBl8YGZ2p0>LBTdx=!5viXEN zvX5jNBF``kDB9$3>Ok!6si#_i=?kwr57`13rFE`~pI_@`%eLD1(`Nx>fO|owph!w% zNazbunsDklhwLf+&X{~=9Y8XaA(9G23jfY^MNdMvgEP)U2x$Vhk#w?=KL~sHg0TYd z&_4Q1TNkTQVyAjK;Jal#I0v4#845F%W(p(e4vGS<{a`Z(?@4XE>QDRXu+hqkWdW6e z+zbVD1HOV{I{{!W@D~U3SvOX-Os>kfi-SY?rZBj5|5*tmewlt#zS3DUp6s<@ON4I0 zctt-%5lTMSh6wB`^~HF z;%iB3oR+jck<(e*%6b|la`4*cc#scEzXK$Q!t(^?<^dX+wDk&*8@v$%kq_v_?ibJs zyhwDGcB>ZIMj#@i!7m_+2+g)j^(;Qzq8q{n-|NGk=cmt0dCjpoIhM~w#lbb)2^r%L z6A8h(P}UAO1qSWcWPiag_>4zWy|IjeFB|VG;JMO%QeJdJN^N=ey*9LX6X1tLsDNGi z(#{+Ru`npa*{~7J-pfgO9K%#uz@r*;i+*4vac)Nd{GXS1bdGfhSy@V=l15;IpQG38oDgztB+{f z3%|?vf`Z~Nl;AtkiDM<*11nVoY)Wwzlh!qjnKAM4#7L2l?1Ig6Dpw8yj5(@Llac(O z&?9_hLMx6HQV2=^*MrMB5$?}-_U3ael4)}?BE5+Yw+XHkaF0WT{1S+v)flFM+lku- zXt4&*WZ+HRV_wA1zJmZ%%cp$yAEI6N2I#h?;8Mb0j*gC`7O5yHDapwn2=W<%0v#wM z00VPcKA@3*pASTh;Hsd-i;x!*QK(h`T0nXLYToFmN_h!d-UEukpjs<4;0^`?9B-3H z3c0{#|CWQ(PgqD=Vq#BA0Qmj?#ZPC-$B!QgsXL=uT3XPAXbaG9Le&5E`Vi3k?7{yh zWrME9XMya1rpUz3ewG4CdxP5!pcNjL7jbkmXxhinw+{YHJnb$*G8lJKB!AKxxKT@a zkr2{|c)gH|*&G11KJ&0KV0V45Sx)NpJ1~*wL9?K&a@q`95-?$;dHKQrX9Pi7fVXP- z7OiUd76`Bx7Z=%W|8>=dD0>Ew+{4E1DhH|9f*SaM;HKS)?k^Y^!nRUr2vPWAjsEI- zdTjY={7w?kM=J9T0MrM>=j3Foy7?}b`T2REUT-1E$jI1#iAv&g$mlgCa0$EUa?SLQ z-t2p)t`0*6gI-5+G29mb7zMBarmK}zq0XP>p+wrGFy|IS|B$FSo$^=Ty1b z1E0Q!R12Krng}np#tT~fSo1@&b`4r}K0p)ba*jdru}}r<6e0}mfIou)+{Fr=M)-$J zqg&k(Dtipm-F9PP<2rJH+dazz{bSGI9xHYG@`yu>#;h#yJY}8}caSpyx87DC?T9>U z$r`OA&@yL|44P4o;*3ruq~;?o?wX=q1v=DF0qHVzLlw3`R`KvH<78t2@EwgeK)5X| zUm04l_R`QacXS+aF#}fgh}oZ@m*X>rFt(QxRa)oSlEIyijl({;MS|%H`An2G`FDP4 zNlYa`)bF}J$r&V?!xrgc{syp$eP&UcEbtz?I?-?8bfc(TLAWj>?D`R826o&ouZ@{`cns#@K2V1F+IMPI+;NfsP#4?IS#H9#WuZL=yYJiJQ{ z?1GGVBgVWn=5ssJlQNcPt~}!5j=Mbb(R8w%t{w#wF)s?87OF6HpS;$F4Qxk&)N)l* zs~&9kN&5{$;IiW9Sg!v5Z90adlauSS-*(OV4EV>!$0z4;?$$h?Wfk`(HfT!p1c~NJ z57=y?93E+!iRIrKmgZ3VyDWniIGwKeeF87Qg`NQ8Owb>UfqfYy#6-!HzO0J?PLm>l zGBN$*5)U5$Z|<+#d?w2Xq|_bB=}9vQrnQ)2U(0`9cS(@a+UXRzJq?Z{#7KaOtv^($~sN& z3;S0W3=c=_LCyv~r1dT}8CjCB_`!UgvSKrc2AXvv+bnk}01;P+!4G7C|6cf)e0a_y z9&F|c9BToH;Sb7)KTa_=1-^x824sNAK4-q`wv;i~5@fMfzvQ%sym}Du+FiG)*tYr<71Gvx0Hd)LLf^R zm*BNn>pE*{PJ@p-(FFF0)9q4Yp9jG@{LeQMUjVrtEKe2C)=4D2Pc5@gInRLOamD9P zpD=68bL;9fq4@Z{>WW0=ZlLX<+Y6%CFSY_OT{~#J$DXj#f$2yN1FP;>e-H}I+;U8624`{J7hc7RjyEYHbuG5q`RBD4$u4k z?w9))Tt4uDz4yww<{Wd(u?SUCkbM4(;28n}!gFaUF=YgV$3GDe9uc8D0)MF!Qes3v z@R*ktd;igWW@rAnlJ>kXQj7{jV8Q*s^tbc z_qwO!3Mggy+Wf@lvB{879GoeCowJ#-Cc(DviGs@SZie<*;t|29{(t@Xnf&b8v!?d3 zg@vf$hdFA9I5{1EXOWkale!tBqoZ4$KJ^qhgKvy*?$+L(Zw+l6Yq?Hi$8n1ev!uTs zDy#o0{=DM+#02ild(1Ny{O8{AZAIurhkZFbbg0#eL?C_}BtnM1c5v|a5j(t@PUH>3 zN_1VFJy_MB=92LpCckh^(m{z;p z_7{i=37b7nEhQx-Wn{wsd^fA^GCXWSYuq^YRNUR&-J5k!l)J#rHZn5mlWlVR*T1nr z+lYV3a>>;gth0Z7rq`M`@4~X0{gIS66ePUObsYLqiJZ;?UPJ zG2w#c=aeTA$Mna#tituRwXtUB(_zMzmi_A%36SU?YQ-e{?tiC|?GKYzr!$SD1&^68 z+ujKX2;6tGv9Y0z&&H6D2|os%B5?`FC#<#M_gU%*g+)dZsIc zkwKkbf3`E3f8TL)Bv1S)@_)0_K)kv--9{T1e(~Z3+HhlD9u+lpw~vpST9Oivyl?JD zdVzg$Tk1~kxEoJT&%n5VFJB%(b!avHZEb8`L1HO4b8{(|Bq`0G^|?Dbj#z2pLpMFn zPdxPX^^cSJ%@%#yDBY~0f?~9v9bZkB=^lIj($&@NK+tP(|4^7DEh|e%M1-RUz3Gfb zcn)TbIfr0VsS%joIN<%{X~6ql`{Q*|1&PJr%d~uo(cbRv=_QJ1t6oarSxb408lGm7qPsonBWFXy zZxvtuh?J%Uu{@@nW5LOm9@5*B)%vQWxO_Z-Dt+1Uj9YLY5*J5KxidGXgJcdSgAn`p z&U%yOd~bVzQ(H9~P;vzpa(kvBx0V) zYDp27&~uk4$6?lKS2D7Ix2VA@%&?|eC8vo}<%{*G=Ab3K+=|x6pP7~xb7)puOzClr z(i}e@U$c^Ut@>g#mB+X=#ecpcF+7Q$1wom@w*`gA*VowuS!^TOX$yl^-cMv(>NKM5r z7nc@!l|61*dk_~$rJIWmyGsy6tI;Y|TC42UOhFsoKHYQCGc+Zp+PE*1Rq~>q8IKfgzX=*JN)Z^m7|-fZt?j5rMkMBUH-boOHvZ~ z@`pMzj;S}EmaMo`-|^$(il!wIhI`cN#yezlC>u5KU^gfH?CdzX%tVkpsBz)#oSO*# zpPR&&0*g5KmkSFE;#A;Ud47Nml6Ft{-YqflKXE>nOSNPd|)o9U|e}6sB5lm?z zE*Pb2kpy<@fos6(YiUW(#id^PwtRVKX~|%T>Ny66r`j+j6_sw2v-Fgeo(ronSM2=z z1sq&uc=$(=@lP5og`H}}#pq;YWZ*n_ND-(i|J=*zPjB4?^37(s$^1RbbVVNh6ES(y zaTzYMN_9>JCD)c+fgW-73WuMwf1p;RQJkvGnFvRHfD(Ln@K3^>n@RF0JA%P;rl zn^$EOWMv0Li1(z5sJW>7_~__RvmVxETvS8VD<^*SVdN7@;-pZ6clJojWq1b?iU9J2;sXA=&giat8_x(7yr9=29+B8eq!V<`udA;@=*^DCotpU zdb+y<6Zm;}c=-AG-@dhnM>fINtK_(d4{z9qMn;}$z3oD~+$0wtdFb9Mw%KM51jKo4 zC^!WGD(vp>pIZtcKRo<@{US|_h=}Ou5UJJ5OifLViShrRrl%Vmj6_9$53sb~%vhDT z-k!~p_+BhsTwKr`&$avA4`uT0w_WeSU@(UAmXV2xq|D67VfbwQIo>lT z#7BwmhgBFYc=a{VBFUuQc0TL_9Paq57B)3qolfd!rKDVlS?{;s`$|goBr$2f?S+rA zf`FZ!JtpE>I!0bzzV&28HYElg0sN(?k_U(}dP2ZRLIb5D?tZ=SjY&-nJ%rEBUutA* zjMBmRjCcQ_N9fK0oFrEycw>RhOxktm7#O}-PrG}12nh%(B;kK1_5Lxw<#oR2gQd`R zJ--jE%4ib)cVYL~fqzh;`x_bF(I-%35^w*B2Ly^KQD-}UHN>~?jn$o>pP!bNR`CGo zNEt_tOSS6-Ly@5T$LMEx@HPTRx+4jNF8&56l7|eM{?>a~mzccux8-vO(@R6ftOkql z{qrANb-bLlYrmC7BFERtQ#N?gphsRx ztPOagorl1QlC-g5em^PK2&4FF7euX3>4+`G@5Ojp*u&nLsfAU(9g3Kp5`*cLY%V%k<96 zc69jDhVPQmhAa=uZjM1BNF+?}v)_8&lE&4XDdZy(VHz0yfilU5mh2?-Q>v=?riw0YR!CZdnJ_V3UYFDO)gfBj*db2 zV5txLOsk)Gy>4zYj(*ceO=Chw$DRN1dTnBT7@8kI1szoQFWgo_h7H8 zX4?~`StW?zfH^9KRMwvtBgog?`4VZuKP~=fV6dcx0{gZ1lqq?jOg}3=Cq3Q49=`D< z64db)Oh_B&3tlW!6sak+ge3R$!itJ3fg<3k|M*6#8AKBe(KEw%_AfZ&Q3X#6_m z4>*uVP)o$q_dblTCFAAQ`f}*ZyN=_C{$d3azEB#v5Wl-i4yrJg)m7WYa=2vlo7-i)c1$1ZRyFna@8G8Mll~nK z4}Ng$c_i1u#?B7T_HY!5kl=k}L2m9%kKawtcNU+!%k^(B7{eZpj#|MB!3*Jp7O(9I zrkA7D8J&u$JCZ_Iv9vFyV6ci_lo$l!+{#n>;5Q`6UERxH;j8o2e(`3FOZ~MXA0J=e z3OE~!?SA^5CyY+=%l#oCA?g?YDk>_3T(;=aARsdYw>dtMBm$oa2M!DjXiM|w(aN|N z+WB42lINSb`BP7p9K6#9hrCOYv=C8it*8d_u)Q$&Qj0Yteso9~G+lDQ!omU$>#=WP z{eq|L$w`axZ7>odFSUg0^_e=Nr>9m@Vo(4E_<}Uf#n~B&NKxo&gCZ_HD1_H_N8@K{ zngAM8rwkNir?Sk;#5)M`F`aUb)iF8J5bFPIfm~HWqFYA9Qc*G5>;Qf|)KVa!?bpri z*#Ay`k=*=6mTmI^U~_nmzaZv&Jzftnd_I&EFFF6lc;4UL3V5BNyinEAY3Vq*;qp7? zGUPZ>Djqk$$_`~vUo$ojK>Y86PS#X8hQ1$mt*1%r-)|2>gwR7yZWg@uFOL_f&|sC7 zWhSjb2)BPfGcpa9^GS${+sxK62?@2Al*kp26Ua$~4_a1g^$KAS^M3sJF#&j+fNu{7 zGE}$G(S*uk{O`M*w3rzC{e@Pl{iC&XJI}mz@KWW-@$L0F8XDSfdf@?`(c`jLs)6V} zw@rUmYNSb|Y?+yu^30o}Sc^0AZ<*G1*iQDU$I6__aarfa1*YbeOH7VTaT1tMkE5d1&CE>G8d;EJ?PC zvd$BjNH8Sj!ZLKfp@IARv@^^?pc@OPzCbb9qc(F7qTcLTuM<(MNsm#2cJRC=y8> zV%o-TW_*rfX4rQP>T?__8Bg50o>OEd-0;c1G|&4?i zgGsvW&1u?tVi@?JX)Sx1N|gD1V6g>`mxlxS`O^UJLthg3#}y(h09MFU?I?mB3^En? z{{6d`r{~^cJ9wAcVa`b)`QQbt%0t$BF?BBCE`#GWep8DCa8h5sKky+9dLI806DeoT z7yTBk$b=hp7N({ymH;dvuL}(g{W;=f{+NAuhPNPQW&Hdci9)y_^PQ$OJf0Jc!+U=( zcC@!$-rm+>e!bVc@9O3Tu+rU0fzYX$OCo}tg2E6>MN|SptHVn7{heQdhpG7)&DJO5 z4V^NH;_>A|c*H~T48B|AYNzvT4Eibva0ZB3i7h>_ryEE80Zp#Q`+`1lC@_+HGU|2h zr^An(x1OX2RoN`G;7f9VkA-0X`H;%LDTleWU(auBY`kh~#(cT~>|kanPd}Tn{7u@e zQ$kG6;%#!mCpnR1&eX85FnfNuZAlpnBryd91T0}^(oj~@U9=ML)E4$vH*EOCQnQ8mC58tg{9Ik-VL#7DbZ#h=5v@FuFqYp%!`J>cd?Pqb5 zahGvA(TPhY>?hc)aDHn!%vO_VR#^sT?EG_C+1T{Kax1Bd`++sa2oar2>dtuvb8v9r zMCK~!T>f^qcz?k`CK@f@#i0K-lrXzEx-zwYo;w0=aJfmbv4Lvd9dOZ#V|7v5K{$2WXW`6PDDYrAcQxQ5cvLz7tz(U2x z$1f}J_#LM&>PQEyzkLDoOs6Z(gk(7+vZQqH_n;zPfx*{)OWx{y2GfL&oR+*oW&$k; zYs3uvquh4|7#f(!=U38xC&QBEEl1A=s=%-kcT7U)g|$6QCwcnK{*3#y&r}!BmxeMK zG9Ai8h9`QLG~gH;g%1@KwYIhvBP3vT6F8?!5Nf@yckAuun>Yjo6{=1%`P`&>+Z_Kr z3mt&tT{bAcsE8BjQcBOV@DaOR{eDBmr;*$4*-JJb`8Srs)P3f%v*h8eza?N zSaBU5U5AS3c?SKEr2e9mUzmZHR-591pg<7&jQdnMWm%;-K>P3Tvj|rm@USrpqk@eJi&8ol7u7<)~FrM+jBMt&Ni}2)!wGy*U zDhj*B$X2l4+xJQpaR`}yVH0m9cAVkwd~HD}eYbV&efs%qW+bb6A$4xGvD0n*yao|@ zBQCzA|6@QVJWwLU;$oiN^8Fcc{P7UAdwJEew0lrm+n(~_^9JM%d2LRR%0{8xslD}LI=o%FpWD$} z_S&!hc>8YQ1S!iYSkG1!I0i!0xm6P!^(0Ag@g;ZoxBdsuVS)K=`Ky%Meha5s%gy`U z3mu%n4l{jzDlA9~3(PD0d0uhF^{GaNhW1hqydKnLeNQlRo?dTfCSp znm7BASbij5SB1^kReczW%1sY{n^f*Gdrnc&SPi2Q7Vo-t@S@LE3TF2HJ(;PWVk<3T zn)Q={*M!jSuCAq@@Yns{K8HCr&Pi#b&Az@jV!mDJ7xKUnyK#h6;Hz7TZ>Xkvli4 zA6#`RKFFVyn62yF$G`lSQRZs59{wuZE%UoMA5HRrLkzqN;ftr{^&nrtHYxS_(#JQ5 zqWdj;W-5(_r#vMcc-Ns-YN+8x6NzPKKWv7kQD9n=Tgn@_^_e;wZC~HEqHj9t1zcK8 zxWQF0&VVjwsqfPu$3NShJv@cG@7&S3IeuPVnbDeSLm8QVcxhrP69Q@TzI3#<-r2%{ z?RnQ}{}`Lt#^+PNMUo$>&Pj#x?S7xt-4wrVzdyA4MH@`5sBZP{ulb3HqmyP(*jkF` zg4!XD%2c;;R*qX%o@=I)O@H;rI|~VyjFDr`q;>i+WZ*jA!C7QXoy^S4=r%FKnwaaEG+CxH9}lxEqOssqNgh!yS|AKibNy@!i?xuN4x0hPxas7zBe7V%|z zc8+hUXkLpW|AP{2B5TpHNp7FirgPbKBY@2jR?5xIg_Es1UYjFSM|O_S0W2 zWC3(uCBN(;K!w>hyY3dP4Tc+YlpAi8--~{x#c#?_m6w(MZ6Mtyof~4DppYX@wuhZGE zshF_>zU;3N+_4#g3K(^cTJiX^Nq+AOdypG&lQ1wagwK5fKk`q^SxGf*Cb}rJE(dC} ze8})vPdfLx==I;0wxDaN)aR>HIiKRLraY#>|6qDD05PD83XP8Lp@Dl}F$`FTw-aC? z8r=bRx%70;>gxJ>@}oaznq2C7lHPXn={nW2Yp@z5FCxKZ@J(djY~59L(_mHI7nO*_ zJTU451GtlSIN;yc>~H`J>{!{_s@w1$XN$#FStZiy&aq3 z#{sUzx~2Gc@Yl`hR`?M&XBZlSpIe8{?ru;?h*_OkT3Y)2`SbMjG@z}|rvm}%vazwL z*2miZQzwNA!wC5ewa<57U^Fx{7e_WR+2n910=dPw7J8AH#_(vhSFv}PK*{9NF?{is z-@ZntBuA6q4m^G`KTa&BuAZy`eMN_uw$ecC%jJhz$p%Lr6RmtnfXtWj zM9Ax`wFz9bfaBrM?dashqSrFDxTx4sV^4(&1EyPAT1qbXAs5NKdb(wMj#W$8-6h!f zTwnRofXB4i27&0j7T*nLch^Ydjf|)5MfEO^?5L~l?bYc+i^7zl*~~;RqAFN z?Y=ZL{xQjZVS?@rj-|w}Ub&3_E45Yv^B5&se}<8?WP}3dfdX4*U}1qejWID~H|_LOXcUYBI64*x~L8iJ6Y>2N-X5ws1rD zrZWaQIv`~MMC5p#ohW0HC*3~ht#+b3-|kbV*9!-gz){~;y?Jj{Pm2@A;j)uBK~xrF ze09k}G1sv>pA{a>585n|a&R~dKz<=rV`FRUs_%5T+$H%&j4=R?YYl!s4a-k%{>na0 zRIcg{{PWSI_Gni1mB%76o;7+;SuMG4c;*L49wi#gqX1vPq`Tk^Xz>wnb)eoQU6z@W zVz0=#zP8q+J$dTaS6w+jg9-~&D46y7gDEVweOurI!EXXV%OvA{VY#kKh`zTW`M7a1 zqd@bl!&{6u0&og|)c$F}=fD}@K&GZN&N)Eoa5XI|3gcY-+`2|19`+I^`+Rp+-C_b6 za~R->J1{^2wv*vW)w{XF?D>@V`09L&_KX0kw3Unm5V1IV8k|k21+T#2H z52Gq8-#N;Ibm;n<7oDBKkB(aG?8$O{#o@6sl{Ex%t)Y7K_Un^(0P>y%wcPxh>DsTV z{w+~0CnxtOjrXRARuuTcWdvQoc3Br<$6Yt7+gE z)Z%8F=qre*r*1!%wdhvIfZp-Q^uml(4*T)s!spsD*l=FGDy{up9#w5xJBo$8(K0Gm zn}?X?0~$q9^77(Ctp$f?FC1Q6Ue1$E4kYBxRB(LWH*NJ+7{CI2`R;$5fw*<+A?IGW zjx9WCm48o;HH#zjCuUCA3|#C7v??SLK<>F-1kf$syvO6}G&}BGm&JOpY-(>O} z@7bxSx_rWN-io*8KVpwfN;0j|lKT0avAFdVy7?cf`+Ho&Svp>Ev}J~MUT-s-l$E8g zrj|c$2@mQn~9E}U%T08lhtHdW4WEJ zEd&Cgo}6oN5G`2`YXIzrl$I4o{?mlx1OUAAmj%?-e_ZEuZ*z9lv=tSFbuRkrll2Ye z<>ldsj}vT#KUd{RqR-PU;LeAd+w{5@`9g=AMAY<6Y_c^adOd;8d?LpQ z9E_9w#b4kEg#Q9Qw<{d4AU_{efq<_MjB?pK+5n*@Z2UK~2*5pR#SELlKbQZnBL4@7)imq6lr!yGN@kWjnUSGpHD2~(XBS<09UUER^pe)$So%wJ zxJUYv69|vZU$V2a{R7z?NZ3FJmr*&Rn`3oXKSvf0f3ZSEqF(sMuj=WV=o^(rehUny z8NebY*45Io{Qko`-&C_aY_4GkNtW*-V+58}{W^|dV~5R|DYEt>XqHq&%-rAA%)C~s zw4UH`aft8ftpM01GZN#q=^ET{Do~Q}$HT_MGkZ%gPf>wFzSf6n~1oXEbzM) zbh-+X*n}o|M|TxdiAy2@TGaRfAW*^R^ynzt+qbZh5%Sz_P)SEJe{XABmY9E5_gby3 zssFsKUPw?-utLD3QVT7OK&_2Bt0|F<%Q1A5E{6b0-=9L`K>{GX^^d)ndlpuDdP~W= zBmcXK8RdfY5>sR=tJM;WA!DAZ(X9*U+BlJdk0)RK)<@{I_XD@ ziGFEhlanMXJRz+kO4Up*e0P4p>sC;s=yKc6HN0bIU&RMCR}NGd$d3(cY4x5U&JdRZ z=wr-9OV-t`+~~47Y{xecXz#vOe{C{m7HkuA^GH zhvV{MO zSOqj!I&9JX9x}R`*HP8`Z6IG7mA~2F-w(JU))5b(jhM>5HKYm+ zO`^74nUEiO@qO@@Ddc+|e}Ix58`F_zT%FyhliDb@_Ygj>f6;5!xSCNtW!1*Gwm|si zP4M#aN)=MwyXa?-WQT``_rd)&3JeVd{b?ZA@$hI=E9OWaBQ8+(=s4R?eu}V2 zq~3FnnzL9Nsg3BcfIP=)iSV`6pWvPYzt~AhN=dB1PUt4UihFbzli4fzVxwXQK2p^SJw7pqKWaY8ocI>xr-@-VM znItdf&ARZ7)yswXk}FKGF+lF+Ki)0ynCYYWpBJ#f!NFC6xaybL;t2_q=_wcV!qB1q zJEv3U#gDK46|E#7$({_$pULyp$?y{psOa`dt9?Pxy<}G}VLnfVyqMfP>2Y^x|Av!c zXpO5E%8f-T2k6z5Kh#Yr_oaQR75WPhh(*mbY6=gsVYS;N++AONmwv>4IF%goY< zI20X`wWl-KoM$}Jzsf88;RjXE$j2ade^SLKBBE?eLiz`gaH-lh28@!ozNV&O^cYAK zG@wBUyP@lVF`a8;3XeQ>sD5mc_e$|y9LJCB^c`*GdTD-~e@(wZex!u<5nYnIwequR zs^@}4w^Jp=SwZsDfw?#n*J?)FsOa}FfO8c^03+(P``!*e>hA7VRuA-3P$ISa zO!V3)+}vZObkw}1J2lOaHuAdXR9lu(b3E>fvrPZ-msuE4=@pkR(Y6b)Az5MPGq}dt zp)q8gxY(txn-5^0i7J*Bde6Eg}nxvUrpr1Ob6v z-Y(d#y^s8V5Cb?Y)nZXLoXyRe%mIPc5s!@$GmD}=-@lX;T3~!VGQ4xUypqEhdVA|d zJn#YXyknNHM5uhm1ByJ~)SA<#2lfAHE2ehxrrT(Sd#O*O=MaSbPqvHV&4JBoNV#9e zALMnKOA+c#6;5rb-Ozf;!#xH@r52Eokiei`Ca0(cv02n4R-59%%< ziDs^u-zhO*{nZJ5LxJ|R%gf6}ryQopgg-rJ%3NI(yw30s+g*sb`!paGW*IN+-BvSA zdnd_=*1<4evQSf-g6woy88bU?skZ<8prWKQG}$y!u-x-;aRAz-c`m5pS|`m=r->>0 zJtf<1Db0?|$!OWa=X95?9OLhMG8Yx#aIk?2NN~(U;094^M@L8bp@0v3dc6sz4)AE1 z9-ycfJpb)m&bBi<4My|xj`?j z)y9vqsYtbos(Qam189U74fA?~@0fKfv3tM*3mb34v$?wJYL&DCQGdOWn|vbZi~VG7 z;pQuK?x_j_co8-(mthRq<;lUgPHyOY3R^vQU5bZnf{D?BEKuAvDmCcS^Nje%~iWIQrqKn=}3)E z>&_y|=@p{DhXsrL{=%M@;ACM=OF4sKwYCx?@|LWwHKk3X;$Hy@t!ZCfb~0!M$0vnT zY~m}>p$%gQPK`oEBDq&;3Nl}8?8tQo|0aaz!F~UZX6x+~<>bCLFOxnx>EAqcBAE5L z2}v0V_q2HA+$UZ4r!c%^iJr&1;+tyIE3+CV9R&G&T{E*P_9z=L{NALLl-}c;lN0vP zVE|SK(Uj;DQD7rhHkm5u0=`1FfIvdptXPw_-T(DCmP5@nNg0w@wy3{g(Os*zas(X` zgX_q?x)VgM#gf3fnLqko$V8Y0w{cX8Uw)x09JcRyLO5$49Xk8^{dZ8|GqYOFW5iax zbxcT_#pXRGvibI*1K2F)(ntF7KoWDU3=1}-4TOlBb7v^J&+fw6$$H6q!GM^s60&~g z{z;;B$~{$Or-%XVHTrF1Yb&|u+_?|ukhg(ee&&AtUiRz{OsG8%MUR>P1V>v#1z%It zd8!a9G$=)sJ)lsrrDoe`X7*;x(q0OV@*X+c+epu^@ej=bioh&*@3tVDu>xcas;LzQ zP9nz7VO`Gj7}sus&NeI+DefbQ^0GR37xi@^aP$qn_U zTbAG&Pw+7pYcCi*x1dIyacScbqlrgHTgg(XO)e`d>qDtP3=)pV0;Fg_OLw$iQc{8_ zR5)&lw1GL3&=TZ$6vDD8c=tQQ=^6C-t5CjMIlf)O?N!Z`-lmEm5Lvx>ZtA0z9wQ(WyQg zPYR$$jW?iXlOElCA8@aj0lL_o0O;aXXn>I$cQ2klunI|ibwDgu8B;?pZoBpGJh!2C zbiEQChsf>HiRlqfNw)0U4 zhZ|aX77hMNgnVkd^h1K=xam~<(B`QQke^E;a-fJwqHcEVlC-_^cAW=HUes3bs`b~R z$h)h-;fOB61v*|@h92w1HsT&8fR45t4wuEsxS^ zvi)=#m9}{UGJW}JkR`$8fuCEk>FL`FeYdx_0J}nj!a_qgH#Sl-GpU5cNj44=+PFsl zu-)F*!l3B#M?|4R&j-C9E9M#JOU_+0(ZKCl7`QN|-SVSuog*g%xf=w=#0RnW?T6Jjz&7UBz?|t6u_cHsp%Eymm3*TDQ z{gPJj*X}0Q9cQC48nM|z6G76lj9unZkSgKz%48}ri`c=A4X6zczgfFZ@bNCTF^!t* zaM6|ve)RkjODf!4PorOQd1}0T^TT}pyf{eJVvBGyOol@y3m(|h_x5bDEa4*Yr%#?d zAy;!C6-HK+?8$foRNhDX-UV@1HQGB7kXj=4iJgTk>>QghU=C9$Zq5*h>$oF|o%v|K zyfcN6pFqbaYf7T($g_F85+ANJPo~MEX}qJA029sKn!%y9we_V6Kn!XG&CJf;qQXGo z90e9z!L{zHot}8;ZQTAn<@GesBT_>`LUh+(dh+NLGx7AvU}l37Q19AZ{bAzomGhMr zDC5ndqx}Ra93AtNK>HGOXGe$4Pv!aSIou#sBiGQ%B`k)X-gW|tKc^U2Bn z7bI|D+TeRS(Q3Q?>9qU84WQ<4zOuMt!)qcagn&_@oUE&>8@iAE^(h4c!7WB0K-3~I zR`isgc6F1S($+9v20TUR$D$&B_lrJKHIV|nip}UlKu}_h0&2f%X7fg+CO^o8V0ZD> zpl@COCUL#E)hpTieb*si`zp2yKNj`%^+^uoI%L%cArP0D&+z$Rp!bsJ-A%7|9(M-* z$qG;a*w%WjqwbT*#*tc*WfdP0#)3gThoehNY5zowYL(yb;OBelf*c&{CPMC{XeZTh zHgG9SY)Nt;1c4aUYr-Wh(=3G$KQ=En@YD*!eR*2N+nax|+G{9mafEpt-DV}_^$|Uu zgoL~ESTolAq`n*Yp42dwJzJx#iw)_KbBa=ND~(uBkBO$H!Ih;t>){R+lfZm;wO3Xi zsMObF%Fd+i^q9$Ta&O#9i1DJqwR011kjz748+H<93JOd-ecW4nJH;l6j-Sy$TG^bn z2)Zg83%XK6m@SP;iQ+R_A~>LYe65Qg>{y~C;rK?1Il0_E3LlYoF2>#Sw#n_odpEa+ z1hM0mb9RrP*igN=xVRE_)gzq5rY~(oe9Tj0iti-)2}nrV=kS1BR(%*9qo2GQn+uHN zqE{mn$mxL4?Ud2^4Y+RMen>7&2CVD8Rcc~!pIBK^!n;N}DNdQL*>$j|X^tV){R|5a zPrYo2j~_upc-{wpKd0p) zAZ_$cF6`-k6t2qrh51EYWZ7Ij+1j+5k2g9RInPWS)XVz}%N(YT6xi;ZLlyY;!-h^> zi-ycK)TV|SEQu8b|d7&O5o$=1$R<_Qfp*n1n|?dv$FuX&I!rL z$%*;vJ*m-}6rt3stZmn)9@*1P(_yxju9e7_Rn<4fgv;U)%-zW);rF0ObbSytz? za-nsgUtMVPNuSFuEq#vY=^`a_^nv^E@8>_8=vOMoEmlz;zJowWICA6k_gu!@{Mabt6MNFJM?mlv@CqEQj9Y>TwH zzeoQDP+-NV(@V$AdNHKw=?r&HvD}uGy|#;IFEgd>?QHcJj36l+Y3>ri00e4KA!@bT z8?gn6ytjo-PKF?~_T%7PZ#2u*SxxE$4S$pCh;yEr-~DbLXwAk}6^@t3K#YnF#7ZD= z!+>$b4t{#eDEJuM)aZSlL4Nz01R&y=gq>q!K{pB>MJ3ow@R7x%-HC7n;x$Jn-R%@c)}WTT-924RD~6dj*XcRpx^=+wAbg=o=yr;~{ zo4oiWfckS^PAM}b}>pA2ZQ(euP*8MOA79FGYX?Gd|wr@5yB z7za#s9-fZm6pUM%nVAKRm?g^mYol#&SP9n26(`2bMoh}9by0YCvPy~T10pQS3y|0W ztrrO9BDrlI$N9~z^FMzIm$(l+12v7YM=2;YHbCl! z04`eLN-Q72U>WH0%o2fxg@r)p_iueYIWf_qRx4oGyiTik;?vuYYHC1(<#&Cy>*x(W z^Y2YeNXXUj*H9$_7wbNgk^G8{mD1WsUtJp+Q4Y&LRa3#Aj9IjvbL*)3zj_J?Yw{GK zB^~yF!@qz3K!ltc00#69_(5j=4BbAYE{|Dmv*4FeZ$VkruowjAHR;Ks=^f zJ;W&g$YJZrY~1j)6Gc6Xb?pZt73XYPdirPAKr+^W2R+`iB?L)pSP-D?N#$18JXw@4 zOz!(&fY4-1i9p`XKS3L-`c^$@T~ilY#Ks=8e>OD>*40&B7N)(mViMLt9p_= zh)n+z6&+ol%rwvdK8_lKXitx_cd-JJ>EVkuM6L2kuqk4gX&B$1sKv#{(*+OlkSPEW z90*AavOQg02KtW?gs0wty60G3Z7rE_xdZU`Kl(F~of9opN+Go=yAk8RVh1}%RoAY* z{eJah)54q!SB;l<&3I$BUbOlr2qF=QRVG>aVsg;E-JjCkVwzwPD$5ZONLym0oPkKV zva(V`+V>nJ0S2}UsnzE(c~Mq1Ttxo{w5K~bsjty@r)?SU*+t9Bh5DT^nw#|MIZOz< zndDAR2m4@?<+80mpYixwZk?F)bSXmjN{{HyD)d5#fy*OHN=(d4EzZj`0eA77oyR`C zHOGx1AIS;iV(W=?0UdvE7p8>xI5JEE&^Tq{yLF8-~*!NC&^PJVviJkX`C z9-ZO6+RFGP1Kpb->ZWFPT70u(1vWSj6w?6q;dCvQQ|?z2FnTBOYds_9aW6Zi{iuRh z-%APBcO(w9EO$LvXckh%>VhW`6*L4fEv zuC!*t7U^w3SUsoEs#iVSgxdzG>$G-97bVc5qpLbR(O0oM^g z5@$S#>~%HW`asydS+4+I_z`;`sQ^jQD8QD>!4OcF9~|nY$~Lk4`eWP)SA*<+Xz=;+ zC^0Qdse4)?Dd^e;t$zQf?~VxwRQ^w;6NwlnN&cxR*Hub2zhhJUKCjU`e3J1*EkFOu z6w%C|sRT2+zDLf0B#Npv!~^)_5xbO`;)GsxyeuC{iMPrnd%?S{cSB^83xWl&J*^pO z2XlZZz#2oL>SRaICDEcrQYiSe75EH90?OEo4C|HYKTgg{$B~;T1%C!R%tLhS_A}6Z zR)Q}EbE+LlCTl8`pPE;J8ocY;3Iga2$vj4^wJ*jm4b`>Hp_b4odtPHBwgFt{0=tY6 zc7qeLZAHobZ7r)(OG`fzev<$NhUpB~ayUtSEDm!uG9c$v<78!}5j{WodU@g5pD?Z< zOO$=Az&2ik$_LPB#}b}RiAau=Oq{tY55T41p2v_3ys|~R4Z6CJl6H6{>}9%~SIrk?8g=Ou`CPXcFuk{_{28G1U zvU1pI{Pf8`$;;I2?wTG(@mY6hf@n6Rj*d`*evR1x*&cgWEY zn1Lav5U|{qZYlbOVX>a{_R3W?)1PxP!i=Q{fB&4Dk_J{?{=TzhRLC+ciRSIk^# z8PM}ma=<{!%ZnT|Pe85z=VQn?JCu_|9fZ7P((&FhOW=jAJAtvz+^5 zYDPxH`^WyOL4hlhzUJ^K62%Ql#@B?~c`sllG*io>uU_9{0#%hVamibD*08Ad&bc;1 zFCZNN%oV7`4_y6z)F(=v&LOx^YwAU8leF{aqrrfXxYcV4S6g$NnVKKnW1n0dsc1Bp zHnfZf9;$SVuKC?c@CbxXia;t103sg{p4WGg`uGp=4SqCG?T9H-A{l#=(~1l6-p(2Y zeE|t2AO@JNptzaFCG6s?HQgVRLDkeeZt5d7v8!#Nn51Medtyz1kbRt{E#iwtpAUqx zvyG37l#g~X0#FeEwbd3OVPx8@dWBHRFPr+Buv_S(qodRI?%2(Y8&3 zsc;Bdl1b|tDU}tMsfk5|pA_KXmHC^ijXv*lvZU;|2PjqbL zXJ9}>ez<&c0&d3wD-a{< zk#3&}YPJYSl|Yq5SzTMROiNmLpP1}?f7es~pFkGSk4e|Wp-u-|YHC#hTQC*t_wrP( zm7Rv1{HE2_tFz%7d?z51c22?eu@J~#OZV-~d4oWjlS2kYuzC9$kPZ+(xdVsN zJC8dthO)Hk%5ujrl(>*r!-?_hHP`8vLHEGf%+?p8&k!42Zr6gxN%P&gkLbo4^=0Au}7H)yswfE zX0wJN@^g-VabpcuQBFaL^yu6OVz1A67ZeJdauDQw#mazTkUw&!45kmuSI!fNhF2AS z8_Cci=_g?bz5Hvuq$rVq$a11Y@l6%t4fAn7W~d=^LZ$h^>ina-<9Tur!=2? zqfV3ovcHtFjt;o^3RFs$CFC{;gsLO6$sRHcMx?tAb}`vI;Kqwb8j?9yrbU`-!8%;Ea;k#@!vZB6{k-=h2&KLqN%8RkvB?^dKMWe5 z!1Tg#^lrza5Llf#i%Y?M9rc|W4#)ptc1GJc1YYr}d*UJ&^UF$~2}QPyMnG7N;C;oE zcmvQRi3SoC%u2{B5cYsZp@3FqSea#z1s*_>x_Dd3r1;Tmx97sJqL)3`PzQm(30S0z z{nXmo8nsDCpu>bF9h^^`p)p{CQ(MW2T+if#d{%OT700C_?@HJ10+_US4?-`}BXBVg zkZ>TddP=M#zj$+Uc1BH29d;oG%0}>cCB6rpb_B>Lefvfp(Rjkm*M4OC8hC0w{Hv{U zS4K$At6kjcOBSaPY3+C$A{Easn$pt(9(Y@7Lw`nrqIs&6EVlRqsFY!4dFVJr$f}zf z9w3q`gLQv}hgS?PS-5-<_D|9SH<-EHk!ZKulY(53j%mmY%7jAZ#_+sOIi^lz7^7z!SE*-CkFGaLD>y;rHupqJ+Fyit-q1Y8vgq zR9S+ek@J0udQJHr#~r@gydcv}d_9;#N5Ez}t%`}L+iAYZ zMKdpmBO=wJW;^LWh~A{Yj!so_(ZD9=IjAwXt!~f`!QI2r|LRG=eXaBV}W@ z0Go(3S1MIHo;+7vX) zm8<(%f#t zOGL_!!&HD_t;Aqh)&(wy?E63Medj-xfBg1^q7r2!*_p}C-jtD@%m_vH%HFH&nVn5m z_9nZs$bhN3w zk#F`3lS$6bvA$*g&t^kHh8c@L!1;B$XXb}7_~8?KYN79$F}Y=_vg*s1_)>hnTsT!O`eQ}vxeg<3QX5*%&uA(*7&)GHh!?t^EOGm+rCSJv+(+EFZpT~|Z*345Oa zr8WdLDEM`FHTCZqqm!<>G+uX?w7x&C3=VAAy0|CvA8#Y@GXK{cEH)97l5XfuXn*s0 znTL8kO?s801V9)`m|9NVFM=F|=hV`GR!Fawva(Oz3VXLOS+q-d9Q`w|og~^N?3|yu zmSCBW{{LB9Is|O{ts% zLjBPpPy+giW0H7SxGDx82vx5C0ACid^yjk->u1NAmg^4|7f188o;B*0NKZ(8W4tQ6 zP-`OHR*YBOnibv)sWF`8J${OOu9Qi7oI?Ji+Yb=IEY@L#TU;ZR97=m418_+3!$|&n zEF^8VNLE3WWv_m~<7vh~{9_)TQ4gG(z!57L0_hu>|HXEZnA<&=NzIuI9mPEIC6oCj znD(0qStpb!l{pq^tr8m>3lvD=8um)l9;3+ztrs`(#?&41F8tXZt zf_?~d$Dtnfe*j(l$0zT2lK8{z_{f-hNW!FS6--U@o@U%gb2Ru9-Tp$`HVQ|#yX!da zhk4O<9%F*+y}XuKy_c#pKV=5=KDd|3$WM|nwmj9%&kl-zA{=e)5t_oFx4er7&&MYv zbpxRgCs+Dc{7g(e9*y-vuC}%>zhR=0P$kn@$btzIw_GT2|6zZiAqWm4s!~2n^o2}t zEB9pI^*pm_w}cD=`Piz5A22(X-(){XDCwiBB-uT_?j0EO`SYq;-`7DCEiJ9qbp+)5 zsC;Z^-uCYP>C4|jmY(a4Ik_2`lsiD}N9^8LSzK~sx}>++DTlq{(gn}O?DazJ7%Jb@d)`aXoBgPUgKlA(B!sftMoK;+9N0X zQP$@MTi{k`cvUTeopiCZ4mWDBu^V%m?Y#i{7L--hJd_HYXUTSG?Im>_vTpY+XR(ME z!nDs~g8vXinTIPed>))m=?iS;PBqotM#hlt*tG$K;J@(O zE|%ovBd^KR^bYtotRp1 z1PemK8|a#(a&UW{Kq>OpEs2W?639kg8{)iIE#WlyKm8*Gx)8K z2K6||QJ}@-k-q*rq7aKW-%lNa>%-Q4&Dfsd9XvED5wkcKfn@GCb<>Q~)kz0u)) zsLIjLF+ECcLvbP`{IAkEo&*ILT=1l<2U-a-S5CcYnYvar*}Pl<(@G7OqD&;zRWp-m zD#^br*(Rtnk&@Y8Kl!t?>{s32^ad-^t(K~sGh!i32K42!2J9_!tisHco8{^gDVphn421Y}y(%To8`sSquPsC@h#J40! zx5E|IDD~5mhG&0U0?Mna?>3m@PXlBjI$Ew{x-9QsLG$D;)DmkhIBI}hLA&N^wW;(Q z>E-q3c>esq&z`@+5CDUtIBRFI@sjtT>W47qM`p&SiuCp)!^5M0MurQ*hexWoBdfY^ zBfRyTQ_~=2oRA=czvgue%ltApBxLv%P8_|+e`TsyNc$G$+3zaa!z%Ia#mh#N!;LgF zPj)BYu!S#onE@69#o>J}Kr?kXo zR*QBQ6=L34kMeU#1>{FA@-_SVS{IsWHY1@mR8~jk=C*4`dC|GVS|=j&*zGDWh|`=B zcXRV&r~RkPmu31!JCXdKRxSx53+3@EG&E_09N)iCzgx*;Nc*v>5(+uG%teQXb=_5d ze%{UjpIPVu$-VCRm7iA?1=Gdm)p!6bEK1GPtxfybl-73tu^-V*oGnJiZAH2L{0b^$ z(J?W;{q%@JEus*=tXbW?Q;`3n(uZ&e+2!`ynDYs)?$pnFRb`M8&{=up6fQ5goo`D& zM#i_+-TOK-)4ys{<=*vC?k`2XYRw#?^iiRYtU}cvy^CkFOkRjL6nIPM#ic5`IC-;Sny0V2Y}q%_yaRPR1HHDzrbbM8dV?ru)?IG&-=7FQi{<#{j} zz_`A4d^uG=_4#Yx+2vvs{#y;gDe7!O#v7`y#l`NT)kdRXem=G9OHL5~TJ_bWF+t1X zpP_=;%f7O;TiczuaQ@?ge zq67rxZ%Ge~a5;B8{fPOJ`Ry9|5)%4r|D|ro%E&N2W-?{3Ax+__?^xUWCYh*OKf_Zq z{B@G|RhXpO22pO#FCN>ae+zqAYcH5uB3=el8h=oabg`S7&D_(~!TX z3lEaMc$BAcg?s zecBg0@7W!nin>OZb`LqZeTQF^0)Gn}67n0|_PSnE9Gf+ro*L-#v*^x%o5F(m@ryu` z&gen?lFY{1n6*=7C(%S67Z=a+^70-&;?vH_$w^=DI};NV3k!KttUA30I_aBe0;%`n zik0hh?ztIpTfE;A?;2oo`ds(#84G=W-Y!lcUN=?muvb{bsX&pOTy%N)9COLU3u6ws~&Ve?#_KYz-Dc3GM=|imOeGBf!*?8NFq9Zh3Owr*x^{Szp_x# z!3L$sX1!I=OeqCuP*Tf`jgD4VS51&Pjx6mt3n{8qX%_j9jzVMu(l=J05bR#QA_VjM zEB%eS)`V$^2=X)lAmk^mTL>3_z!+q_Ju_bNTrol0k3II}@}UJgQY~A`ZR2cr>+5#0v#n(nUT@= zFO?)jx$&_RlTkf49i5+4`(|U@u50~_#g>|}N_z9f-1U>B!uIu9+?ZG%vBN51FQ1swg=)@i95`60n4v-D-#6a`NwPu2z4E5e_L)c8^!Sjxph2Aa zz2;2kFR#+mBGYJ&oM(p&xmIhi+z&{bU**q{eQaxM5?QueaX;_PxWgwbuwL(?e9dM{ z@2YH(Q>ou}D}bTetCPs4V^eu=f8W$nf*y!*3;o$CD^b9OOYJi zzD3c&pkuk#?0X@z-a&;UseWKAMSt3MGpM(rAYo)!KXwl7#`4-A<_7&a1H-nj*=ZD_UW*06LCZn89`YXBgu=eL$n);TF z`=26{o9sH`n++4(?xJ}k8viVnXpKx{s^FLmyY1EOJ((n%pM0S)+%)o_9{n8CO?uyv zJIBk1!^*C$@`PW1(l=1IWc#ppcb9!jT*%I7Fpi~Z_W&vugd=%2V4Gc9Zz}x(~f3P>UTI{-$L)JPz0xu8?7H z6@T>TkqI&qMTX^M7m-<7`YBF25a!4#7x(%J-9}H0mD8Zls_5Y4d3j4_yUebXW_h_g z#@`hFMW)#s9%2z8_dU)h%2KiLV4^;AvLK5Dlnj|ui;D_A!Z-p&Ml-ii*afoL6i0%T zvV#i!dqhq~TiQXMRB>!aGasGLoQaR9L_AJ5R-L)IL&LG)n~tgJ=p>a>PHz9XH$Ij7 zO|{)u>JhA@V7)^=o7p6NNx`FjCT*HMtFES1p|ulXMd_Loso8E9$Jawc?u)i$A`PpP zRWDZVjY)NolD)GRf?PF&r-141_9I$OWE&jsl*ljM5QtZ+d_ zPMd?NN3+?%qqUM|f#y5WUh8oKSg5-R+O*qnco>CP!Z^v{6K`hM)z#V7eDU(0{nqnJ zSy}wlz!mO1Ob@mlt=ZTrbdN~asbn7Q62T8$xE`5{NVO~qr_3svgPP`7F9MkPCp3!Y z5BxdqEQH)|deEtqyfYHOEA{*o+~DL zNpn=Ui8ByBMSrHa}iBd_QTD77H0+ z`tKG>XB6Ibj#@!0J#8;ZH8P8|P%6``#x0pOVrp^s>x`A&{i@;Wu4(nsEvMV>DZVr` z=t+0D?Yvng7Uq)phz6Oi)WB=$USfE9E*McdaVB2!nRRup=i&T({JdL^>Z--L=MI@+ z;Z6&!F`0`lja>Nuy~Voi+8Ki_(tJd^yv-`}oCD`s!P~xDC~a+%Oa92XWzvDgo%u_W zfy|~8w!3zbb;9#hJk7!%@!YK0+TLf%$uEf8_dK< z6=aO3M_m}drIPQ&R3sd=@N!B%r9h(u}it5_5)M_EG_1{7~sSXe))j?++U z0DHkwEn1QW+fksi;GL8m!@f9@9(!0 z3)4G3oS|H!Op>447kt-W5>$$b($QtR8dPOH-f-fh{@k;JM3X*O57<-j`Xr!-#_XVlWXw8V_Jc;Ze-J^F6f>V_aBkjUL*2n8Q)=&U$()*@J}h zadj_C62Vud$v5o3)O;>-+Wk8Z`DticSoue-YoyK9-Hx z9tnzcdtv6GhqTwM^}4zsZ}hK^6eLa8DQc8;8S2(MvanFTBvFa&z7u1TU1k>1WsyJ| z*2bXcb8%|N9@*{h$zP6seve80xuOfw%FX^Lx1rz2ZoDn~w^!Je#4>@*~87av-=IIx-z2h(^2yKpxW^t=AxDlL03{p#2!IH5HmxWj68cjeqFv2UOH~$$OY6@!;_tlqEm-d!HtF`=zrOgn7c>R9 zPt@ul=miM}b&ijZ12Gv=ZBUE={AcGnDQTjs{@#z*XS%t)R+-KQQgBgJohzoeYO^30 z@)`R`|H7@KXkhc(g?XN6l})EkRXs)E^$yn&{{cToVokw41qVpo|I!8=fNqJ?yLa#0 z-OprDAfJBpw<{W;a%Hn#==Nl?ii%bnCf1u}(lwM?S;hghhVR}D{N&8#h>#25Iy4X1 z&<&xLv)jY^I&gdoRa8f~dWVlz68DJ2+5@h`aVOD|ljrKy-{6URqEUnqOnv>g5P_}v zY6Vw1fUC>PLB)E_TlNA5M?A9L;ZwJD=Eeu+!?lD_0vh$bR@!Cy*DVDzal#xNTpdI& zUFSaMHI|mL_a#ZDp`WWy;EVe-wOKKRdicq-D5~`nv<&1H7c(y&+<0^Gh=oPi;{>H7 zdXAD{gE>2N7Ahm23Ph0i%gV};=RfJOYOcZDBrUR}k7iUh+WKs#o~!CCHuvgbeHh3n z?9M$YV;v}QOya5W)k$x*UwgUxWN@B+!cE6Dl{ov9FjN2PA*m88*!Nk55@>+bFb$-3 zd$-J+12(LJf>ZhxB-4kTBDHeUNXLN8;^J_~JP&Es;P{B1oZCy=1JwxMZJ8lDt5@h+2$|46GD2_4DBA zzlZlS{D&^%Ip5qLoG{sXLJ}5Z>1(JUX}a&&h*9}(a(W~c^egUe`&yC%c=yrKvk=tg zCmQAfYZEK28n$KO0aB6o>t6bmmMU|stbf&RQu(-{fdpb&YZyo#M05aS?FA8=F0;P9 zxm(h3Hcwt$`B4$X++aMd8-H=|V|fU_+eRgH#~k(=Wfpeg;NaBO)>dkNORSr8cy16a zPhiOnQh&6v)=%xRQ9af66Pt&DC8)_df#62l!aFa_)8sA*3>K|cFrU$s$Rcj$6%=fM z#-!3hf7<2Q3JPV9j(QrMn5YFL9c^uMcH%<2RuB2Xl83zXkU0n*^ZRuaD zrK_LX=^xOvjZBVzey-dFVPGOsXk}rqL~4sGp|^r@phl*NgGt}RyKdHQ%}YOKOGRsw zUN;tjFAowp#%UghwBa+2ZZ)7DfWVOmnaREy&U5L&AyNJ&QEVk_F(u#YC)wMM6WNal zS3}G@Yed42(QHw52cjGjEm}HH;O&EG`T1QyBvWRZl#;R}FAuP*%Ja7MFb@RHwodk- z4eV=PtcQ(CvE3&+O+2@Ke*FZ?0~U=_xcNb+;5H^cqtx(`gL1wH;bk7@Sy7_ZEl-OseMG|xrL7g zSq7ZkrYFiGs_ks`|KdEyy-Lp7?)~jzxVZ%L8>xU$^v1jp409=8`GO^2t?l+R_Psm` zPExgc(#*v+y2pq`S!=PFUK)vs(-B!R@xsrR{Fgt`sKm*HT~RXVt{{4R?Li^}=URDz zoeGA|s?|Nb=)(vxHx1+6fW3x7AKeN)t9WKIeZ#FNeq8y{hSWgKI<_A6bnp zw1x0DAr>^CuD06PIp=kGp2ZtgkWXSYPEJOayqjwO@dwpYN6jQ*mpxjT2=*}kCC%Kj zKa7%O{*#khS|a)*qy@aF2?LMMaAl-2v_t4HXtn$YJll^XN16mmgF->PW1O6ErcxVX9T(tU1lRdZPnWS4N98c4_YK0Vss%+Af7y2m)W zR*m#EyN0cx;}}Ux-*31Eca&Hk%W3?yhwIIMCZQPZLs7$gR8~z=UrOWDeuk z*Ter);OR)3AWEl|&#TVsT*x7;OLzLtB}8zPp?wMiw`(cu^nuuGY^UWI(X;tG%0Y`) z%d2@hF*QZ{z1(}p~;2W^j^m7Hmo z*S&idU$*8!m*W*Ar z+U&-#L}!Fe5#S#_sGjul&dJwt;{~=LA3`696dQ(u{CsM;{-1u>_iqe{wVKL1K9}%9>D*e*+oK5Q88R6F8FZ#3;&BFacT32-vdL1OImY5^S12^t(x_EM1o4YnBLA# z;D>a6SnKS3Ze?Zl#(3njmI_y~i?5X#K(bx z00QS1eV9yegQJsuKDs*jC+70T^71GYnF_W4 zJRv=?bI9PVq{=&0lUm74=%T*IF&7}sh7 z65AaGwUUXFW{6}-o-sq?fqR66fg6;Ngzd@+P6_8eT{?^KOmU!FNiXSs#hiC0W{Qkd zttn_5ryuo#jM7g54rX=pBr|M|oDc?$yFp7aECJI8A%{^n&-(H`pto1`Bjg9*^odClmqWW^l~@CE71Dl z;;8xP=xEsM&aX01;%D)qX)nvW?_Pq?~N@9fXjEwqZymmC$dX0#7OTnhb7yvljdzF}y za*Ra)q@>5hGdm2@7NJNaAh80P*$-~}%Q#W-^F!Z>j~pJ3 z0WCj#u12;`7IJwqx^@gO08T~T?EH1`dI>YhL7jV93=q~-0*Q%<`iZF!093x~iu!I$ zC>s3jwTcQsB?E}Cl#Tnq$vU>ldfIURba%0%`D{}1$6Lakx2LlcdU}Az9aChA zahxXT6z+dLz2Nj{dm}Ct@Fk$oUO5*Y8VX;AdBH$!xfsi}vD2d~xiPRD`_Mx`e5 z$49J%ngx&3)*yn8aPoS)mGFPNa5wv7lP*glDd9AFBS44V?g-GrLMWk_mYLF7Hu7R3b z?7!#GEdg}jBb|S*t^%^*xB7UUL&3jf0_?UDiEuVX#(8dZswWF9otNjTXs*Y5OIk1b zIy-Nij6f9FYMazcr0%`7u|Xgw@qTPpuFWmwpE5Jyl&I&~zZ5~I*3+baM*|tpw2s1@ zMG3bZCX+J8Wp*h#eK6QLcX6*2wkC{6;gkVQlJR}dl@qdC1~NbIy^KVE$O7Hyj8CBA z=D-&^Nr}-5_Vw+bgCZ@UrZmr1dT3Ysdh*04RoIQk1BV*ntDlcxnSI-8yBp56`*2}W z_ss0g8!8lnrKh}_dpE3Eg{st_c3@|xRMT=HX+Dxhnok#DL|@!&OqmW_jP#pAbQhs3 zhyQa=AB-!_xo~i-8yN1h=_d0A&@Uj6O8pC*G&^_FCyhV<`@ZAasC5MT&Ec0slw}Rg z&3YSLk6BqqFNy`Mr7*7D4M5XsFbrB+K$_?Ce$T_)`b?aDZpv3mB7r^d6Z$%Qf{Sf+ zN$v3>n^e6LYW*bL-WbpJ4x)b!>PoErF@fWn0eR`^zl5>^P_R!JpuAo68%4BU~ z;{dZb^SVjd`FB_R`3_32-*H&80W?ixR*O6d@?=jCoN`J^M|XzEis5F$Bq9Sm9B41H ze?V@Im(jeBZ@M_yxWR*u8m(o=sOee zDQyNNRi)Ai@Z3)_GIM47ITcUp#L!WY4#UL6tZX*I#l^)>M{Bk7PPHKnH@&rghUN-E zTX!teLK0}fUmFo1NJm?Z$$;f}+Etk5b>Z>{BPlWQ3pbYRHJa7y4!&%E5i@ghY)wwE zGmT%q=mB1tB(1{Cfca76h37m$0XkIh%x-p8m01!58vWOtKO^VS`U%c)FX6T%q9vf39 z8%flrzui2;u=)0y6$Y#A8i4*Qi5yowbIh3ZKq9G&%tjz(|DV_|dNGi!o2|8zW`00U zP6zWcg+Ly-Ry{GfOg{h$9c=g8uBV9{VyE#!9w(}#dkF{}F>A>D03k5rSK1y#P5m-g zBx%5F?VmpSW&3#h=g*(%`X=yHXY7HCf^$IY5ilr{2|CK3DmQoltIzjdy5!zyaE8~} zt~8R>?7we?i>@}`B6W7DCw^tvmyzSvKnQwpMpY8?O}Nll#ION<1`qV9FblaRWJ7!U z0Hg8`n?KR3WCYHezL>Z;;28^geyO;DN(0+1M7sR|QR;rYcXHxtLnpfH{}l0DNG^_< z-{ZtyC~E~SKVEKb8woIxpAgJMJ!G@Yd?F$j6UJ$cu&v2%!0&0J0K$`E05z|O2nyju zz|SmR*+b?@UA1BphXubQc?Hy!YZ6>R`i2pL02#QS@RK)6W2F3urM(d(?mSxfBM0SLuSDV zxb}~jMBV@GK%fMAQ~Fh4GC`A#*;hOT7*pOaSRex_@fXJ4x(*D@|BD~0Su(hNUn6!i z$2M5l*wXBmKsO#VFe{AvhpevNB%K2O-t%s2x)!wP2rBo$#8u5L|NZNGz)bA|n@=gW z=jzGVum`|EntOJcaDiB8Ac~pj%B;pMeUIgQ@}v*YA^&#f-=|3mNm%T_G;s03(96%f zXQ5-tfGu8L+ulll8Z-#%zjy_f3W4~%f~Eyf2>fOeXbQmg3Yum?r^nWzaFEL{bKj6# zAl=Qp)Bq!_-&Je3QdxetC`}OVlbv}`AY_e~6LJL9ho7IHIB7qKvc-D==6S+`rz}TP zk2wwHMfi>L;NU>zyaQhB-K$j3t1dQT#AtQ-`LaxZcUI)^t zzklB}zn&k$YZG7>?(glL1)V{-E#7=jPU6L(wH{Lg(G=d`F`ywXMpB`Zj7^OKeYEr8 z_ab5E9kDa%59)&UtDnGzlJ9y6+5Wx306PX@bT8cxE`?WTqk1=mImF^)VggZl*|VTQ z2;5{yXqZH*ZhR%uhP$2Hm$O%&TkkP82Sp4=2gao+Ss=S^lQg+ z#>2-p?lH~PJ1Mylc@d2bRaA3coeIJI&54Td;Yir7A%4^$Ap^`jo}1$wWgj%DQ&aX~ z)esf82DEmrG6avs(uCdAnQPzAKmC&leUGXLH}syE;-H}YQtMA3ZhpU5hEr3d!3Ud; zT@35`G{{O@_DiN7vJIZ3ezYweZ9&dGKsHn1+r6=@sont>={JZ=A*gPlO^R-|w<3xR zmkc_>uZ!e2G`tqdYHs#AL9nO~o8^2=Ro(DCWE(@TywM?EkRsq9%Y1Z477Q^_2L@B= zb-b9W*1~f>Xmn*0;0K~$flEH8w_#ifQL#I?xMm0Uz+3Yn?-Dm6^8*IDT>Z?80fDHG z==Lw>tGtp{`cj5Hg=`jDTbtf1%*st3Y27<$O~Q^Ko_+H-Vgl@jA0L&sH0Uv99-rGE2g4m9<$TTaJPAzV46gRR+-*_eL%!Fum?+~Ag#|% zj817)F;V&R_9cE%1^**4a~fwj%k=Lvy50pBVhX^VPbH)W;p?!*^W4<`lNwlLYM^`y zL^_%tJFp$D=S->hyw}h*ucQQkBNd;1Z)<3c?g23qU<%%TnC$_hwci}Jgpcf zC=*>^tdx0qW;jiaG6$z{R=BKSLAosQpPu>UQaaPsHh;5+5FXsw*=c<`AB3iL?SuinM{Vu<=tOKF1Oiwjg{*uD1=pWik|JXfX9i> zu!|cysed|o5r~Z_OOesI+He2==l}H#ym@ AH~;_u diff --git a/g3doc/tutorial/images/mono_4_of_4.png b/g3doc/tutorial/images/mono_4_of_4.png deleted file mode 100644 index 675b636ba7c5216e1ef21670428b5f7375b054ab..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 36209 zcmdqJg;SMZ)IWOY1|^jcq(oAr`$$MhNGjbO($c7ebV*AI5>nC}DFNv^bc6Ik8V+4| zpYQwrX6~K2bN_&gGdkn*JUiCj>$BEp1>x$d@`}XYpKjsKmOHV^Y-0K=d6cm>A6>qXlP2O)!X%*<*pW zWc;c3|^AofU9M%tgvrjW&eNu zVPH9V-@EQwgN@qYZ~RBTw*2Yi;xgf~-?Z2bxpg^YDyFE&$^S-UfX6}8)y_40ORd?I zXz7^iZEbDxv5&Bf+Q-Mq@x~As5)u*)p9Tg8+I~L z;E`obd<{rLEJ=c$UCTG&Iwz1`h))t1sy zLNsdBAMnbgS(DoBqc5`E+V363GtDkdTYEg;loh&{(J1f!pF-`)XUnk3DC1XzM+U*EYK8o0(JTeR*_(`|1 z*zD`??~fnCnBWHAa?kftdG+e2L^Kj@^i0Q(@+u-fvPzA)LhSkT6<65xrJya*-r9wL z^bY0iAD*?*9unM(y*&9?Gv9r%T@wLJggTjV90oQeqnm;Pi;HaB+vhz)^KLWw6p!$r z^S9Tu1GUhi;=OacQD>}&doX6sgy8O0+EG~xrmM5>-@mKN#Cm&s_xJbjIDJ4%rw#TK z#+x-*IT zp=yhntN&mJ^9d0C~0FT&zf&KoYv=nzDFt@YbVrCc`S&sh%6lWdu3 zIt82KD@sK}gBIqV+@5}y>VkXB3@?*%J9(BFRQe=@4pIo2&0JHu?i4z(Y^pNdNdv+T z?bljbT5>A3r^X&h`YCXyNy!U~Xu&7=Z)U&xGwNPdqlV7BT)9L|Lqp*6u1dn@szi&OyLX}4H-%>W2|Y$nh}#l1E$ypy zh|0U>kA9*nflkYJ(!VQ8KN7$VN)ots+d zq+C&6oG+jKsj7OucZ|37?K+YLFSUe<@rLUi#J7``k!}^-uC*KKhd{4>Qxn0jU4)78 z*bI7b2O}>mkg~$mRVqsx1{UO{#$z6G2`MjJ(5wgCi+KN=tpb@TQXyuD@B71+awuW;{*!2W%ise~ijo{IoCAM0)ZlUV$_gXR84eE4CrW2bY{c*V zss_9_*OI<*t8N#j4rg@`YnAmddzrFf%T^{grt)i&SeBz7zF^ z@7Z36Xv2cjCN}XHH#E$LyHlTycc4goCag6Rv+;NEW|%%5>(RFq3oM(Jy@kg4g<$5a zP-+ZfLU`_w5_^)XrPTH0YI=yD6W)mdH;$Cw`Lmxc?e0?HvhA6$rQcHj0U5e`6q=|O z@*zFFwUDQmSD2`9AnrQ>ESt1fT?O2&?8MyY3HuxC>xbFx3qB7vej4CMT_4H4=@a|O zm?nms;-yPl7t0LQZ2D(+Tfx~7ea(WniQDO?%eKEz$a*jwi!uldPqSjeNkrL}D8(2{z z68Grh@wn4{Y@0pTbF!|B!N@Acy$D7>j{7+P>lvak_=P?_zc2h+MhOd1TFO}$^r2+s z??0iQ>;N*!(n-i*c76x~fq({~_JE0q8wtxO<-T%UdIi(JjvucnC_75G9(-B@v%b{W6C6{gPh=#7V_;x>@G9u~`U&=qo^^%^wfLa- zXz3YHp*nZcFS3lwd&&fE&WvFh*=^3D_gv8VQ&MNxWAy8tLM~+6{1+P5V*MvhMgA1qK*-2?7`LX%Rm=byp99WE~K^dR~v^g-5ET(KXF4Fm*+^`4=AeS`g zb;0Or=p$eW$*6LAw<_K2v z64A;T&>UtwyP`zA{MN#+5{6*TwH7L7h<7!O5@CLZN-=RfW#)wY+AI zks$_qDDsig^}zQ&-qMTMUap~)`Z#={QTkg4MWJKm-`6eVKU_l{HW#T{$z)LH0u7=R zh9-XP9^?_^<0EtTkRY`IkXBe&n8$v--c|RhoO`IRzkikWDbv(8mH*}C<+L*wCnu)i z?=%jP&wS8hCcSfteFe>T9VPiH9|MXQw$f_W{``?G`i`&33_Xq%YS3Z4`A|}e%enUV z>>F@Za_DOsimNbEjy3|mL%I2ER^v{Y%amHS&)&uaDt*X))k9f6VD9eDKOXxsl|67Y z;kCacCOByUx}2w0%Ou zN1UrMKTKbWC&$M+%ZLEp&hO#(^H1Z=t5SyVzP@zjySg9a$IV#rG0+oez`YzMd^oxD zYkNvOGG;Ac|KH`6nPMAsFxYnSY$RYLY_BkGM97TKC(IiBTNw`RTKMrGFlfWcB5I9W z$SE1G{Psisuijq8+l#mcN^goVTxIX8T2kGn7t~VE++C>+1uF*3?p@;;(T6Swb#lYo`jcIfFMO4ipk}%D)FF_8 zHxn5~k=@_I2<+va5;4W?Dt<2txEv~2D-d2`Y~J09&a||&vM4(ZEEQ+FHws6=&wt+T z|K~p@6u=L0GqNH2@BROmzk+bdDJbY9{oa)6?9bM6JbCgYNlm-tdtcu{3~G)bHbIAJ zI2?Ykk>PxM5GLh!cXK9nwNpJaGgE=?cd?#A@<{yrpo2tSUVh1EyDTOq=H}|Gu>Hcz zOS8X!e1|hBDMW}2H7%(OV?0|@UwL^siR5_@^J<0JNhg(Y+%v~oy?V#F^tN_#+T+Oezdq~+9=d*=!~J8|(9kSQo1=G)hr|GQAwf`T zYHEe98}>FzO3KCet3)H%`QJ6{T#pokjEoAj*qbI$!`;IPmtAoeMXkL;R@MyZ)q1sH z*ytJl?t`ny31TFXP(n&=eSO&x2Q^~qR|#L8HN=kB((;^hYi4$KHlm;Zfgire-eRX1 z>YhPf&sJIX_w@AWJ~i?5tjo=PJTf&mwV9af>jTF^J!deptGzrPmbzn4^4kKzz( zHP52VJbvDw##^Hk8H0KU!m+8)yIs-xXn4Rq{J8f?HnaOb#7A^H(`Cg_*1(irI1}n6 zh8oAYI{$+<9OLglLqbBTt%q65v1;^&r}H{7=I^8?ER8c5T_uHF?>x}HkSJd^*qSgNA<8TI)`sw zKtO<{=*7{Ic=Zc6w991~HShnfTu_)Sqc=GDRwWr`nG(`EhaA0KO+pFY4`w!~?@gMH zhs2D(mt9Qx*`hBXq0PWqZ zmTEh<-gvG(KDk=D3s6-hzT4vB;(ERAY#O^NB!bM6@Fg8rQu?mOi(F%7N&1FjKaf%h zyIRqs;LBa-Krz_{t(Ub{x$50`{DOkX4Rx-K{`exHp>TiHi<7AX0|Px%uP&~ShlL!b z@BLE;OJbydLS8bF-atu1chU>@6ca_^-fz7X&qgMa0>B_mi9HVfzKD+{>!ULEi*IPe ztF9`VnR$Zd;}WmI29N!3+^Av6E6yX6+0jwb4Rz9R=JtOCxiN{QFA!DF?QFLcL$k6@ z^X*V?zPZt2*VZaV9szRCReo*ikpg{P;^Z^acxV{C(>)nG4xL&1V3I?09a zKN%AmYJ7A;-PoT#nW7%-p<0(C^7I!fivz!0R$pqwkagkE`^6yynZwi8thi-{hrT^P z5p0u_2)je_Rz*a5Mg|Khp6}OBI3z9L_!PxeA6#5qggbRj>SRmD%+-%ngSR0%#W*C7 z{v@hwZfr>2AjiNP!hGdG_!W;JsUo`z;+#Y&P^YvW-1J$Kio5=Cv~x7}eMM@4A}L_$oA!>UAyd3-G& zD5zd^3(EKW$?mtUzjSHm*fTIdUTsQ%Ulc}?ZGpJ(R$`U$LaCiXU`6ZgB{d%0l=?;9 z2=g~b4^gmf-!ry-tts#r5?2TAem&*r@n&}5qaRon&u4f74kIxIoQ<>9Lie^_#DYN- zAarsIg+*wK$=p^1PBI^5i^|K5K#*j>ZZR(}FP2`4n}b7H_E?EI>w|RwZ;fi0Dh}kQ z(t`D)Gd6aP9Ve_siT#So(}#scmJ#2`+zxaie0JvYI|hSS9QVfNrslJwmaDx+LqkJpy505=f)7;OiM_Ws zOLsTCs{oe`rDeS&_BIl!sR>_%#Xd18-WMBHf zV*5(o42)d&q*_;coqzFG%QDE3Fj1TJ-3aLO;}JoWR(KE8Pe;<3NQTK$!=>id|h@E8>6K+uK8lXc8XfxVaU> zVE+d+u7Khg22H;2PQ|F!Lvy2Ac%GPmfx$m>Cs^_86V0&T`JU&JzM}YU<0hlqn^ufP zJmJ4G5u=x(s;xMI?QEaCpqY0Mj+^;UeR_M5*gM{HXMP*0(Pd3sq^x8W?|aKl3SD9^ zaPON2CHu(V7`DhK`^JXe#Xr^qsv7kqD}B&vPaSnzuwD&21hML`h;NAPH&wT{zK@l= zfetAxhp0TCk^QWXH#3JLpVmP6Ag4Q(i{KUI7rxc19q7q(GL0K_ z{u$wpsI25?E^b{JW*2+BG@GdlYp=9>9i7Lm!qO}U;sq8HEBOLbe6!1~jMjM*UHVgZ z%LcY8AI|bXs|&}wn<5e(mRJe>>%d05a4v8*pUn^U>SbvutQ^2Znr-m!ERG>!g`sZ7 zF&BdI88gBmEd;#bg%No$Selrss_M|rj>ACM<(66?`+Faw$^e-Gh)T_WaGEDb0N@M@ zF=RM0o?|p7!o_UJ_ei4gFBEkV0lyyjl&3*bPt|aiCx4hd;K>)RBom#N4Tgy|c zyd@Q}&c$2<8EDWc&3rT2fqN^&a7U#p;AF)i^wD8}2z4oQ!Nip*(-EZXr0$*0Z|f*k zq~T{pYisL)Lu4dR3jZ8b|At6Ve{&U`qIomXf+qUTvRr-zZO7L;Z3#s8b@>C;AGfgcm zay5lr5KP5cij}-%8$!OiUN^q|&QMPhaMrkFcwsE=g5j=1DIjsf4CUc^9safx*o}ny zIWvKyR~YDrbc-g|CP79Abd&4HG%Q3EIh%eM$b|hAzeF^Qt`BIlbPhS`-;e2TCf*as zeZ)pww$NkmHMqq((_tA`?)P@n=IrBqkqwjf&89j3%nnVt&~b@+`STwU z^u3CT%Hi60_ARpTI?sRKcS$(}c@4~>9Fk{omHAbtE$ZS#MTg9ZFfo~5o$i9rXmS59B4QOKViKzR=HwVQ8y%j!f#M6v^KFCx^u z_nXhIN8OyAE7Z3xfPlpP&foQ6+nCw(JdyFj(qH(pwh+J|CQd#zfFm{_pXDs>I60=HtfDg5;(s++rXMy$2#LFfIGznfwJk^%;nC&H*z$km zzW-atpdk+r{-vH-k7g&RdQn8E9TO1)S7xX7BXb)-OQ(bE85V07evBsTHz$G{F0vCh zdvS{g2~$u|0J8sw_6@!G``>CEdS!yww^ElQvCJ*XdIZ9!ubM74GU9I|Q}Q7yQS5q_BV2hHh*~7jDQ=t)o)rss z{kPolEv!AK}+gVg@VVE)G$^E`)ZmG9#RvoMKu}@f$OKZlyg- zd-eJ{GNlQkf^e4%p_RIt(ZtqV#nW|LR&lEtNxO?{9ao=I_qs&d63PL|aY6gN-*O2# z1Z|88>hF}8W6O6pdod=<(&xZ1VLgN&stJPB-amoIT}gU%=?}yEBCGND34K-vdJ(Cs zoIWWWg)7AHFJAZ3n=K?XwOyxtL+2sht!H^yjpW|hUIy9zYjX5CbMKYe(Q)OzYjlbr z8y)8a1B5r!6wb!7Z)=K*dUzxVo-gq&ArLn1cPlIcP~Y45t;1gU-EQDc_!Cj7r~eTA zkIIYf9318wT$n@!Ay86u{&kh$0A6kzAXu8KWIyY)RJ=FV%tsI|)6vbUeY7VSlXkgH zWWOYc1syxT?2o0SrX=H%=gbf`lb;f@EPlJcSNVI~l*d>1d$=If+S_;p9Lq`0?(Jj+6y+-q;9@s{~cq5CQknH8h#>^?JLis5Xu_6 z>HdTe`_s~3I^bpEM8^ji$y!4W$P(tElRa9t*oU8YO(Q5j?x}Ay{yj~+v;WjA+SybS zDUy>>Ppu5^@Z{zBLFj3iIdI>gZKKqrLXP5RX7fQ+5Z@OpG4$PL3lGGY8{ZZDm5YcM zzsUac8>8~n8Papx7kytsIDKL<7;*r4ey_39xZl#aq71k+Ps_RVH1hgy+b5ebH+D>$ zl^JWkz>EGu8002nNtt~~oJlKGQr%a&V(QBO2jBv4A_;S?5g4DBXq=nwt0muz#0HSt zje`_Rb$$IAKoW96iA3Qc<6)L)boZApUpi*)%?JK4x;4j6$(6eEj#=4Ok+wwi=?#wL z$&ZwvK}=KS2!R8MK~Orz0(GAL+2&C1>gIHf45U2bLuTI?TY?pm>X+$6 zs?7uUot4OU|HW5)oiHMX_o264LM@Nq!$N{2FV+$PU+Hp-?j?dJX+P2888H~p_G`Aa zGF?ra(7sK5eI^n8EYWqI|KUKvV1`hhE(YXxy5_*ZcdKM?=?s+IHp$=~_DrfaFg|Jd&u$_@m~uhD#_JZ5bKbNNtY?2sun;Q=oBUJ`rtN#ViNX zQ=l8wba!~8fd)PgNGh+V9E%_aXNnI~r?YG_m5(3UY!Qtj_8wP0=Dz&?E1mZfkfLg}E zn*#4{^S|1GSr(SxpIXTl3%&D5>Z(`me3WD8iBv;nIG7V(Yl+Dv{k_3fQ0v8quZ_vA zwbdUD@?B482OQdO0U19)uW1@{uzLYQ@2IqhLI-5`o#?Ui%Xt6c=DZauS3huqyrtz; z+oFy(l`ASZ5CsbSh<}#<9)_~@4RP|*u|pFjd{ZTydClu#Z85B-#Lp4F&=#GIc)8cq zR$-<~s|rtlM2kqyEXu#Od_444<@L>g`Z^n#tltv|HL0J!D%0R zr@F_&xYb>Z?kA1N`^tR^39A*JFnUO*)TyW8UIB{QzXN+2zY0(`Lw5I9JcgQAZn|6Q3)5voT2f^*E>S%sc zp5Tq2k?ZC?;+Z@n+(xT1bS<(q2q#^F3_9&tYH<2u!h{lt`K-U0bOlkRab5pvZCu>^ zmsLT&>t>LcR74L40yZZ|7+mv z)%3ya5JAI^)AeYi_`Zj&E(3ZF1Qsm}Kb%#3vFG=e5CC6yn=%~DAl zY&`>_pvHv*j}|+Ht8NA{FD25>vv%IdQwPX-YT%gWo?p|#58LX=?0Oobcd1!t;se%n z6J4Q4n%&0)1f-SRq7aCz>|lQLcaxow${9$r$ZUg zStUKA+4tUtUY`O0fd<747I0;3QC?LGnH!{BsXO2B1aPgRBS#bR52N`;9x>k)gkw6n zQuqDni;5ydf=%(VHM7trB%vee_Sc-2EA!epxOb@dp)^fyKEd75>B^s2rn2B_U!Dtz zNuHKn#Bc%k+&CHJiAyISDmMS@V~wi1D06x{4n8FYo};`8>(&7szvic*tKiUYSZW=rH*g1WCx32ZtSyG5R z;GCo?b@jY5Qm1<17q1XbGdhG}Ikb8G-u2cr|6=8uj?qVhqs+<}?cdDiPSE-Zu?Ws% zK~#Sg=r)0@k?pUD*Ca+RLzXX=5C14;JymwAs;gE;$7gt!(Tg9ofh?0*4BCE5D0Vf ze4}SCKVQ~tAE$RSt;6Fa&&BIO{_4+)LVUK^Qrd3zLDg=?HmOoG3O*@!fkvMpTxY5z zLaY8h8D3hxefrgpS2N#HRgd|1ILd?h&|3q_j>QPv*6&So=#&)!fBtB;aPGU&Uv12m;-d3+zB(1Ze}EKg!D#*$ zk9Y@+@{9zL{s?>P_Mnm!m0cjrH~CfbSbAmPJ07um{JJ=PkkP z(=f$moU=w5`S$Ve?tJD4c2@m8N@vf}f3p+7Wpn+)dTr;fet37f>+ci{TGM?XQy6pB z^4Y5(>yK31KuQ&F_HFAy8%gzBj#TSwPrk~fG%FSinbRw8v z0Qc2lsinE5M$4nAsz7zhhW`Pl6^7#|O}eTbe;6^H&qkW{!662O;dNEh8C~B9CB&$# ztSm7xkzb|SxX+>r6xYQ6Y;0(Fzdc!2T3QMOH{(~Qp9`Pd<|NiXA>daWzSOyDBdX^9BDu)F!-{LdU0zP3K}0EvhWP;ip;G?1I)Jn>vuc5K^HGqjQsX6Lg%_(yXv|)r&Ez=@A=@FO3h#Uzm5u zvv;=^bY6_YMpYaHgNi~>5Xn4sdUlrBn#jeklE`CiWu+AbgNYomT=auvL20I#m(E;9 zUPP9p?#oie!f|{VXurMqb z)I9b(dLt1dprp!6O2@^8cgFPno759gz`YW&pYDDBg}n-_$K7o`igK1L1(bbLqjG#$ zQ{bdhp@c}&hg*rb_L91}pI$(ZA|@QF0jIw&1$;pW!@Ho3u8brlmWv})@f7h+a&B&} z07VW>MmoAKebgEtU_^l-?ZbgtOo0*+@eaV&=FK!%`K|&Z%lLMI^@se7W#otwh@-BC z(Sh-TP3z|_SZHr3$1TnVem*wwv7(pfew%K?v^NCK4`OOzX({1@{Jb_>Sy>5+8P?a=zkg>rb!wlV zp9k4(%TRT%y6?(Co}!tD%yq2+>c5BKR4-%X&Skv|XKcPtnMe)v^az{7ON=VFYH~y^J-Pw8^DO56(Au1su0n{pjyfbM5ESecQJUm=q zUr%r@y+xXnk&)qLB-EWaZFl=Ez4ce$rwf(%0m9}xcEgeieftGx8BfXuvsmk)XoW`r zlWIEZoEAj?TOd?aG4HMLY0LNnYqMjH&=?CV#ArVHkP)L}<9e=>s&w{3bSUud`za%BO=o@fP9UL6|{hOm#X7mY7FIQblU~5Iq;mU{lf=>pe zLZ+uvqJ*w6Kb4c_h*`~UxrYI_ngsPTr;pg%V#UE|f8>g_B>m3gWalO)bv%`gjhDUUbuYgS!e%V_wi<6DCjB^Q*w$!sJo z<4_YoI;0YSWOa2l^VHDbVCAPlQ1iGm?jb{X`^VG%36u9=@CmM~81R>ZT=^h`0>i}y3rq>1SQm8crGjn1uIPdWH2MqfkS9HQIT5fHuRza+> zv&VJPo!r+usF9Icb#CRT5 zAt_x%WMpqLP=s1aP)c?0(%r3D9kzQ?DRV6y<9w@vch{IC=62A!uHiK`DX~ zX0agu-~Wa)Tk~vGEZRjNOj;f6?U{@LD!Pd^7;smGtnCOq$l-hboH~PgHTzz=uirVz z^V?ESfxy={h0B}vFUcH@>^p{oM{FbA+S1=lhBsL`)s3Cc0CzO#!BcX4QadV2N)U^C znU|K4!DReHi~Y3md0JF+XB^EpBTck)Ha0e!=hO z|1eKcW8pH?w=|RjZKN1 zf0f6sSwu#n1XW;HVMJz5N*V~ly2c_=Q z4uvJ#cHp{e=9&n{1tqv{xV*R!@#>hagUl!+P`0CxOCyYx_@s5n+o zds_VvSe5Ol=#S$vz@a)M5c!tL{0y`SEKR?$9jEZ@GQydEH)LdPJ2aq|{YR&)J}ttS z!k01z;81TD-?=Hu!oCOC0W&CHTwDaVvaU{$TOc_p=`qv|6#GZjcsuJGaFA~_W^hpd z*q@I-=5t^3vQ6;nGvQCpbo?BsBI(gs2J1}LXH(Yk%@P!PMDG6xk(W|RA@GDrsy8Ys z%KD5W2BBI^41^T8HXRp&6Te-$FEsk}vW_hRWb0%;+w4D{Uk`6;ggg8`4IoFHOXfFN+iLF$f^6IVd)kR2E9P%~TaoZagnX2Hne>`4| zi5auH`*m^A`Z~ybbavYHG@3k1J2ifeZ!r3E_=sLIhhxK1}ocYnMWP&^JEy`I{yyh4sMEfjZAAg=g1(qg^4?U4kYcWpuf zDXAU3q+id_4QERK%_QB32~_Ac^rY6o9{eyTtAve&#>}W<1sBSWP)}u#PFm_rMR3hJxgt30x>ifa`RckbH>bdw#rTL zTNp4?r9U8plhEOzp{jnOOVDZI0+Lr!veA%CTv9v%ffHt^l%K--y2OQ(r>CH!+Rg#r zSy6*caUFlS--Sqqs|wetEQ$0!f`5c~ZcI(8+5hTvbW~#^d{yA&*UP*%6W+w#%;7YI#9Er=DQG8D>LUcj204NE4f(9&f$0cZ$H#Ky)F zoao`gbH5~AfOKw* zIhA~gjzPsuR~_S@6pgmIq0Is+9tad%mas#uEiFOc)4mHXSisI-esuS59$p|>JiQw@ zyZ9=DeCm(>{;!p9>$2u$JyYEA3e;Szq;_O3a=;Jk+N3L}KFy=zI#P8Db{l<|U7z@@ z`ApmGdZu~HyK&`j0P-Es>j|=)w}4BnuC87q#HFN`#>NsUdXFA~=7nGdJF^wVpaA4& zcD93Y=hUy?xCK^VV?(xq|B4G`Tdt#Ngho|q@EXyL2*TUb39^o&m{PuFy;)Ur8_o34 zX?EyuedzqLkSW!V8ti106I!ickj#fk>+v!u#=wQQwg$;8vjmf&pntDAW<}v$))MM= zYVFBl>ew!QL40KU`1XbIX(fn0zi)Fn`rgpjJtMM3txp%vSxCC^!u5KYo#MZ1#!d(i z4}~;;8&U;jQu~B^Jk4hJMWDKM`u!ynim>bdrO%}M!tRG9H5a9;JuHzXsP%R6bu6Ix zt1XED9k{NLeET47HnzAh2Cc_9=@&oUBYn2+95s*KmooF+elo&iqyh6qiMHF2TE-`BgnlOTV6L~8*M7#IPC7h-F;iq&O;Kgme6_dF%R)F zY88%C@y30F)!i8!R}HGJ-`&b;<51~dj@;zrhGx&uc7; z<$<^9vcn2i;oJ?T?At$64Lv6~CjW4CYFvHe+Sn52R2AC@21`}Ag6z7&6p9QD3~J_f zVK7*Di!dglX8{JFhUbnqZ-eKD9045o3dNnt_|b9k@s)sT|KqErY&@yy=i}rZU!ZU525P`06yCYYwmlv;wk@qy_8M&jCD1 z-@Dx<4hZ$UJ!p7P{m!63ZR8SZR|M*QW^L7w2Yk9rPA($&PJi6J3Oa+0K)qYsBAOm! z{3}odF*u0VxWa&5qfSlxEBh{+$ROypv*J5WOR<_eRNSZ6r?ir_nff21n5=uQQ{Uy~ zoK>W0g6;4^nhtd{P!^PooSe!RoQw?&n0bGJ`y+rEBK-l9Ne)c8?rpW_MeIX^(Axug zi*VI2R7^cfIKhZ$!a)FBDR5fgwuo;FybSbWMKuLMXK`$zUmgpir*?6_bv+iO8J@+t z^`r-mB=ESyBSV2V`c|KIbce`xTnUi)0?NizzMFNvGVx@m=(M)~7wG zI5#OMkfZ!R!litO?oJpZG_GY(qlMfGIGL+Bw64_zk=y$)5HXzWsMQ8T6jFPDU<)G- z!knt?pQnC}mvt804*ng0@4XygMC_GsJugdFXEF*-?*hL3Pv-Y;?mPLy!sn(p8()!j zjF~Xlv`Qpx4vA6acviifuh&L=L>^)k>;o5$3&-=CorYFXDVp5fXo^V5GzPlL9(@0y zts-DMCHb<^a?_)_2Tjjq(BXaC{Rz#Xn$*aM2zWM_e3S=zc5*T}G-Ow+cXYP0va*a@ zG+_sFZL>8Vku#Qorv-|KCk1IC(crWP_gM<>N_2iNThFe)Y2_8$o$cOp{~75UdBG6B zfb=5=iR#eaji1WQq^KNj5Rw4+aBj>!r^xp)pWh0PcDnd;`DW894loN57!hU$j^&Fv zOO=Wx#YC^Vi^{1E?|gONsE#R8lKkOOi|7UWvVE9i$URl2Uq4-(x$f$%hS}HGcP}SV zxoXc|MYVsKc(}}UFPnCm%Jur>)5r5w6X5nmb)+7S2P$-Txd>-oe2ayG2mwuYtl6IU z;M+04AmNE%%ZyCsE6%~OSC7$=H#WqKDkI4t)-Uh0Xr&zKKP)i3;0mJ8c*1dOkua;fHQM1;e6s9G>~k)#GUyk^k$f*VJ*1z7@8keMu+(J+_U{$!(=wtj z@^VL1OpIQ_hy49M!_4%=1k2z3%=#1jTi@Bc0Up?svZGT! zm!3H}zpFpi{DSxqSM0NXd>H9+WL;H%vcjGK|19UQ_%Y!n7tlVY?MnW$WzWs1H>B~lpax2_YDrN2ZBCp zU~ss3coO%`9wElH8oW}9L$a=~tj`}_O8LKIc$LL80?VCPG!g3eq$n>acs9ccV8Cqg z#q_(O7PFp7NoO6p3R!tjsP|d>#md;R+4!+DXwi&U_uU-IR2M^n7$sd#HZHvQDHD^7 z_di(R#S~{iXuw5x(oQ=gGjHM>#nG&zZ`KR8%8{!zZ(FO~@BZ0`_5DlyKaQ*RNrb@l z5B?J_NdH z0ZqLM<&MtIB-hDC6~H2p|A0`V>0z!luUI1vU6c^8Ce8F8|NazHk-fqDAEeN2He=!S z-}G>+%;0X`=^ad_lusNq&~vdsr3!)pT1fLTb3(iMUzTbVgVGmzdBwaD252E`crZ1p z6~WwmeD*CASTcmlV3-(bQZC0_*+w-nFuz`+AOj}axK&e}dydFG*`CVV2j|1Q7x+HH zCp8EuPO-Wb$96631LT*L+uiN|99gDsk2n%?npc=B^DW1d`n~{=qSp!pixB2u9p7k_ zph0^PhXxANaq>JHgdZSWF{PY#KC@3`NC-=F`+3+n`De;}jRgoYvTVb+oac0qprk*@ zLLHR{&hP#Xn(tq-=51_Dz6<4Mdp1|fq8t1$jC=nlwPSLbemsieB-rS{k$!4Fj}sJ_ zU1E5dpoK9jX2TYq_JT5->+26~k4I7lE`(O~s`PIXzK{G@BIZL5R8!*wmpVs(RU#9_ zXhB$DYs)qu_ZU>_85r($A_zh2Hc6p|Q3TeAI6?kx-cRd#_741lkW zo065n#@^!H2kNui+og-HumCA7(kZ#06Q4V#8xRnXmp`7rIgzOlgwE%XRA zGC=mi>zyldVY{C}$N>-r-67ML#1OcWCze%Ubet+qBPST7-0(T4ciNk?ZA{33^$+`K#tG|{r+iQ83pUYQn!^bdV99#q&Et(AGn--IstG zk}*+Z+>Cb1TPH{}PJtP8$^kq3oU{~OO`SdS>sRI_3Oi~ZfV6Qb@(Zo|iMc5$#dU)G zs1}d>5PcU+gcF)k_Sy!(=LCC2rANfS7hecVO77J+3r{Xxg3E*pROZoXFMRDqorPtt zBlwJ=({=?H?)uosIM16_LZ!6zNIppQsiEQRLm#_7%_sk@eRjiKX?V50={=wa?~u>X zo9!MD-fGGw^6Yy4&(u{dsp#&$dPW@R?)dCaa|;STeQ`4h6$8U7%v_U>9W8i+eudQ1 zQU;ch<70A_y#FQ^P?39>)BPt(=Pi5EkNkX0G@l83=fdRVYjY%`$a<0(d>ur-8uJ`V zAj-`auQ2a)2JUleBQ{{?_j9H`Ry`l~4-R;U>d>m|x|{@W$0^}|dpmh`pYKD>yU@^3 zov)r9+;B%r0PV4u>sg@13?9Jiavi!OpSR?O=s5@diO`MA1&(Db<0`hP7rk=ix>`ZdinW19=J z&6@GRPf1SxSdRMSmm+gk__rHY*s2--%RHdh3^P@!Fae!c!^h~Y4uDTc@m^us^tqY8 zZT!E=`|E%xzb||g1tcs=5TucmuAwEQLqNJDq(MTuOAwHh?(Xg`mG1769)^aY>uf&X zbMNoo|IYu1zZ?hNd1t?SuXxt8p5@~QLOk<84~OR|8GhQ1xwWxTGGkJYH^@kdjSOA( zM>b29@4*}et)dGbil?HOI;ce73YUM8p>}-mI(pMu@vkru52_A~0BFe@6UWN%!y_Zr1W8N|q@XGPD;Mtb9|N0OAgtFTlyVU9K@E@*Uuq*UO2V)n=8wv8#90R;;Eb0e&GZ4bAM%en4PgwZE(|xWLODLTheL zej;{>>M0WE1D7}hY%|_Rxda3u4iM}uO?|TkzZE4WvU8T!$44soGCrqoeT-5e;Q*Ee zA^fjH(C1T=D6$zKA?19CnzktX^TEQ)>j#w_DQME%pyFf|9)r*vYt*u{v)9?I=xS-X z?ai{4sukNTGI~pY7uhEd; z^OQIo=$McOPS4K+x=bY<^ZBGDVBj~e zv#PZAbaqBdm44DUiGWv_PtaI})kSi})k8pmcH}mZ?lv15aCOwUWfIbuS7!o1bu(cQ z3`uJX`^e8$_5Gjh{6hij()3p1wzUaO6YG#({f`{jLcM^!kAs7YMfpH~!Qy9#l?=#o z;h}um=C6VPtwb$!_!e#M;ahW`WpxZ0-AHe*upttHaEzU2{8^f5UXc9D=B44`y0PhL z8ccL*Vsa1ek+YJ)@tN(yMUBs)Yzn7=#iphL1JqHL0+0<&;z%k-JCCq{r~%uKtT&*T ze?o>r(04Try~XUu-$f zRj6VCNj}$Ody$Yf*ApolM^nuPR0@RBz|B;?r4e?f<|&Y+nwXdXD4k|jC*Q%k$XKmh{aw2NQnykoZ%xYgXcMCxYJs~1u*96g7)=FV|juo4HW$KNKGE?JMf zbfapI!sTaMeO5a#Ko?nYwVPu9dV$Kw*-_D{IX@sLgZ%DV0Whg&QC257ZVCXJAfxV= zt_-$dLejHQk%vHFUP_Lqrl;!~8hChl3uksY-}WLGy%Yz2h(GxC5XOuDA?c~uOaF3P z%-9+%O+&-9>$Y-dx}p?<>>6ovs@^u&PH}JzZdpbdNXMn7z{U1o<12rJ`x)IO5{#61 z-`%C1jr2XsKOv?J?8lsGcomyASIA2^GVip%;Fqoj?qv4YQ6MoXsTZ-pN5lI5jlQ}a z#(+HhJ%FdO84&CNImO3 z5S#h+OPzq^EXFT7S~_)(4z!Nz*J-aO+EYgLqXkjlX?5;4PpnMLmyLWc8If3Q?6fTN~`sbOR1EviB=UfrACqVzA`0owND-ghE6HJ3!&0a`JC8Q-B;bs*~8R&@s zRN8eHN8ol)&`FnjLcZB3cF7ZDAwCGHcso}{I$;MbId5(N~9>1^zl;YS$u9;O3uT0 z3Ud+(JKah9kUSz;til*2dI9@YbWBMV#HyCP#J2f;`MoUGA-w3dB+VKG&!gbnVGA*U zM}Q(c)=1h~>kzZ{pD?k_WI*Ulk|3GaM7C?pKl1b6Nlc85hPS$><>H}`feO?x7q3-T zuIc%-s%!}5sd>pEh;l%H5)7VA(jKUf?D%gb=WSaBM;X-AB^a?Q40ZPgnWD!Z<&I}( z$N21q;l;ntH?IyR$mq*HZ?Tf3;{#N0a$&!Buw*#Efqo8ir|op7Bv>=@QoRB(3UCLL zA4djV4m1Zt+@BvRpVd;p3x4yfF~EwCbwMzT1fg04J+iwg6jMb8O_eI2KK+yKQ&wI@ z%?S*6_k-d*el#%|J1Iba#6nQh^*c=6xyJ{ctM%r?`A7>9f;`nbTYimtRq%)6AHkf@wJ`O6!>Q z+r!%b0Hnpenl&pFKoQ>E*?F7kUQ`)79MbGto^9#9y2l*XEq9_?uc+oo@3 zYD!#Wzqj`(61>QvsplzhO<9nF8CO>t&Wm|9U z%MxupTV%=vh$vvA;^p5#cj(Q3e-HdWwc$dPMT z3SP#Q*%KTe-U4183)3pu^s2*Fh#*qyBfb2?Gf4|iE=rQU(Z~}SIaMn|&*(X`nsL;S z#xWxvRc+6L+anMr_qkJjdN0QvETjZ5N40k= zzUK-}cW7xGbD)5OTJ9rs5f1xvoA>+89gALRJFDFwOXIYV&D3=IYCzWi>d89Wkaa7$ zNKBI=Ws7-ht8bd&_1c)u8okZcH%W=`=O>>&bHwXbswWDAU>6f3RhLbkEqmbWCJqCZFx+6@y6UGAw)P(6pP)0B{?C1g3Vi zMR3o8Cq-((PQYyqFPYPoI~=7llY>DTV~uKz20 zzQm9%ZL|Jg&>`$2;Un*;RqAAutb6--d^aI#)E|r1VL0>KJ5TY`7(Ird_kEF|n}Sed z>^S}5aQpH#$!0vG#QaD0n!Lh;4CtwFbZ4GL(VygGMxf|KFZX)C2Jka~*7*eP=`DZB zC8?tRs_>RWTOIPD0gYQN zwacS#T(m(=AK(zri>C-z-{kz82qP$k%nQ+h^7oL_Zuszy5GU!>GvZd z*5ToX+hMg$9H{X+gYx2nm>Sx|iJH5|cp)~s91C)Q;3kWgDbpf_Z~WC4z_+?;YH@$` zp+H;(oJa33+%i;AYikC`P)KY{WB2S%H^58R3iH$0wV{{A(frm7PZ;rybOcM4dwaAOFvV6MaMW^5S4o8 z(xgXP$~QMH(QD=Nfl6*k-E*7D-kjZZF5Z%x^5+lIJH6DhVEwO{5y`_MV+Oajc6N6D ztHw|7o&haAAY>>KTk=*o1ay8k7PNl2MUcC1F14}{z6k-WMaP;cioJqw>)QF>hu_?- z3iWw-wCXhe9;9h>rGFV{${2+#F%Kf?KOHp~x2Sa=PQw-wj@Qkb;>;hrr(h6JDf0So>0Eup zx$f=J4p(o#)B3me<3eb8=d$Td`l!@6pPF7+y@rvbe0qUE`fqJ<5C#*b*Y7X4W6Gf; z?DXtAf$$FK+<6ha$(XX>oD(qsCD8dopz)Pi3yvBfT1e7|XWfabtt@Kn$*U-yfYz1^ znh>PGtcRn((DaXZ$Dl>J^N3^L?tN-4qD^H&6_+*D5a~z?QRMsc&)Mex@n_AxAxLom zp%AJ`3rG@ZB5P=vwRwT$(DdmYF6^4{_({#QF%8MHeU>}`c}<-Ak1(i%-Uix-82mTc zfSh)$Yhd7TFBg5#(pqwjylfDeIy$2kth8SCzo5bIiEz@FnPaEM&zVS&E4(?dAp#Ln z!!!FoCZBe-MjXv0(}a+S%1B>UIKWUMr>7s8m1bWcKgS-aoZJhk8XB{J?!@T!8ZUju{3+O@r4fJZbpGx6p@mbgJCIfZNII{)}Scu&Lg%oaOe zW9+e-8JjVONIq1-Ve>0v&#rOyy&Mwc=P+R0?zkElh!o6-_~6YwQqAC5sAgv`@}8h! zWNeD7ElmDuK!SebknnDX>ph(90DA{`ocqb%%767x-v_`yHWM#AKRw859(FB z;{R1QO(6UTIRsI$hb!$5JpO>$inyzZogx0nB&?oev?<2l77`h8FjQlh-jNeot4siV zL?FDmx$P(?cUD?Dx%j~FEOhMQWXkPAA-wb3>{MvUG7PiRGYm-GyFwPK_Sxl^l8x3UZF%^ z#H|PTAmj<5A?pV}8C?6Ezd9{CGmbdS$9)!Yt&Yxe{fma@Z!f+@SnsH|lx%9Mk;?AE zc|H5zZ!Z}xDK$^hxHr`wq}6Zdp4@gXr+!5BPyAw%;CoTr#ApuCorAPQg@>($_LYVu zhXzR(YvrP(nlXF4u94gR@hsXI!kStq(d+>eYsX+F+~~?7#cfEbxo*45lwIYZ3Sa#6 z0y+x+9=2fQw6%Rnp<#|RuOvB}U?eZ+aBerAHi*hj zoo9DH%C-DL>`b`4bUCI=NKM^OR0go;!otGR(!M}60!j1p=g$d4hgJtPa+$8r9CvrD zEvqzAPC%7QXsnE^HiXoc#tg>cZD+mOn+Mgq<+JVaX26zK*%*tmI={{*ImgSbYy2YZ z;Nizo_rAVC*op3Jgy+<$mtrxZ8HfFRwv)_Yt^kU=`@N*A(u-MBJ2g@$z7yr=7~QD| zDwZt3S*KvzpoP8A9%Q}(v7O2L z#9Nzhu>`y8Cb#jk0`a)&vS4Y%jq2Yy>GmQR^ySLNiIuKR89&&z_A&OLgLwsx=Q!V$C~27xR-#xwP%<*xg-TO>uE=NVZr{30OAksDJvz zdazYnD=jKYiK^Ska&{s6wpG?uB#n{2qK;4fAZ651s+%;qPG_us-jsSIWX#@N3AV%0 zY^B!q=LLcA;HVkIf_9XGyV(AGe_>kyuJ7&L?z|2`1)`TTdNmXoV{`B_?Q?Y>ktQ@;aS6Il5+wMlgswh zxw-Lw8IY>1_3VgFb6#_Io76hBo6w1LJJW~!zxz(w$*)2P+>|Ub)c0bDEF~TvK{t?8 z6)ylRfICr!F-u1yn{vz+%fmmd&qkF+wX~A{=xf+m4$hmrG>PHSG_2F2r>8G)56@a$ zuQA;ZHzRNuv!4dq$>dzB`O((P#JO3bpYtW#vMAddDqQ_2iB;RXQZba1Pa|FJ4R*yW zlq@65^I2w@)j7>wUq1;V0ASEE(`ay35ZTSmjlhfEsy&^t4?bz^VAbMS^sH+&i$j}e z(=6cIvI{k-0kllJL~uF2)bOe>Gi6MT|QbGtBgd-yrYeW?`v$@?2tVX#;2&i?5Da zww$V}s!wQbSw2 z%h7y&LMGEhhtc!hWSnGMTANq3q4_zxZWlNZebKb7l1F$yHB)W4l5LinsJA{h+EJ_A zeZbI11fI7YjkZ!_ZiuNDn|Uk>bEy9QQ7iqY9Ig{a%&>$D6lv~|C;73Mt%_V zxxgmU&)Vz!xFR_G3EE;kX@J6?q_rj|Q{?M-EPSrhsO_G9O)9RbmJdIT-0;*r&+)`R z51zgP#=&GH&GYusmQx_4B;_VBj;!{*o0-`=ZU?Im$Y$goQKYG+R*n)G*i8i!Wme-q zz762uNFHVznn@|eRO~Te&urkC8WnZR*3ZpoCmBM;s+x!+OY)spKA5o`U87({Zw)1H zPo$}6YcDJ>3$o;JpJ8SAR?yE(Pg*~-DTY<)aA6BqXhX(hLVhJ!pss6{Spe0Cr!^c zf;9v@r|@oIRZ6;b2nNDHyK(L~h1Wi>yu7@EwNnpN=6s)Oq{NzAsiwKSjp_K3b67cT z#G9^Gh3Z6 z%ES<%Cqd`{UA|ki$%G*$A)t#vL`_ZmN@bSAM{7h)_v40$V#k|`dY#Qn-es#hz%OL` z{@}fyAtZWHC+Sx5>jXnmQC0J0s)(`U)R)#lDfhAYim|l%A|G`IpILpbvcpwtK{r|iEuf0p1s)a{Q#^^UteaUcM7=s zaj;A^c&)B}RqO>V2@BNRLZ#5x+Wl)a(hu5I)w$8Ml7-L)C%Nm6QHA@e8lJd=I&mIh z)59u)l)70y@2%lfhI*b3MFkdlsSwL-mnXktrdC!C1*W+X34?e;s@Ub^HS0C|R=!;1 z7pXE*#+7(?n_D?Uc!7 z7253rE$!z;rkRTIf#kS?11o&U|WnG2%JO_D0vF z&au@A)z_}h)=qUzFp{lJH^~H7!s4Wnz@dr>*QEim;7qdg4#3msFnGITENe*>&3EISb`NpZz@ybf9 z9Bgl|AAU`$?DR$|CS?T%=GwNiel>T@6>mYA_oOrl5s*+w-q-+%S6!ZHTCW$5`q$mG zI>L|LjrQ9oGRUl5eN~J~HhOwwh;B`rTcIf>MO$w9#ksE0YX`y^6Kw|hEtc_CJJuI5 ztpjQm9n4$Yb#tjqGUATZaxy_JQLmiKohS|Al4R1xn1{!I=V759=rt}^L7oFl_#8Wf6L&lNuLi0!fBxGhQti zd&7DsE;Q7wYB}eJn(}DMDiJ@vI6giGc^btDV9J;-x40rhcgT4;bN`sITffYk2&lm? z?HXCI3N}1TdexLC;vY6PrK1pGqwehJBzlkG?*9ayv`vzWUaPFH9-@8$3^fcw*gTyr^urxAz||3fAfT!a_v2slL9xnVHl+T2pp5rHX=_Pq@k4 z6~kVsjN-O`6FPA26P7VVXG0!}Ay26qCu@HEF#bvp;XI=OSfJ;@Ro)9SCc-&f&-H-j z8;pp**F{C!DcBgJAP}t1E|HCXCL<+yHYrcNs*7|8Au%jb_7jhod187KhD zERrMY!;ZYE{sQWyqod=GqrzvSibP^E7W9KeF~-dz%5R$LI;`z`7wP?3Q_Dg);#FV} z`S|hM7<(h%d*zkFmaEgoY$wO~#2wv^+pfa6Fd3?e(=7bb^Xa&R27Vj_Rz7d73``Qf z$+59WK8w4@n5HW-DbH=m#P~QQv!%4CVnX24Utj@dJDz5C$;Q7SsoRnpu6zN;>(`G*&9buYPxVM*CTZ@g`sY@4Mh-FZEgCMG-M(Ww!>=QHZxun>Wy zMIpKA=5ys_4o`&!J5l52&pPS@UfsyXn}T1QQ8Y7FF8Aj{>Pd+-$84_-@nSud@qOVf zISV@N$5Xu3yy1^yCkC}E^nIsZFSAsMFS0f4$;LAq+fee@+S(Q~Oa~9NJi~)Jld@cO zmE!>3%Hh3Xcm4f!Ul)t!U&4&yH2X5E&m)=0fPk3)-K4sb{5I8fb1w>Dfk3=tZuKdJ zfcnhLPMT9djYF#aI#__m}uLl#@It(Hf z^(|$c7mV`#NWWnJN?D}+Sh`9-dU!ns(_4}QX6xZ3InluDX?ku;hvFeCM;X?Cf6Ay` z_p$j8UZ2;u8Jh~w9$vk@O6%(H8I7_UA4WM4+eIyqg=DJe!lR27`xc|>)kH2{#9r=xp(LXg}{In z6CFVaPYhUYxXQ0zzx18?@LJ!D*@jN&*37@A^%UtK1Iun@5%$Sbamb{xz-X%e{%gPY zZO_XUuY-G6FV%FeAV*-QOG<}(IPmS?{rB9X(_~xm8Hxo^*sj|Nov=`1ttv)WTjQ=IGaYK7i29KD=1~ z2OkpI`n%X&M(qV$9Ok+I?bybPpQ9lU3IgzjfH(_f!!=61O&%mr#ZV5eYxC-o=RHB> z^A2p4==k>Pjby4C$1{}cYnJEm7c<5>mwG#t$ypCjmKTzqF2J!kSTE;5_)8;L@}LCZ zd8Y-3nkZ}VbGys}Yim-G0B%4u1{d1X)wO?_J*am#Ur%@4i#W1rCoY%~L|46F5@`jNFtetn=5kJ&hj+%> z9=DX4)G-=YyvhASmcY$Lqd&LtzdKh;34XO&-aAG2wcfHOVea9LpG0wD4U(tTQb!#< zhjNqMrYwP$7_46g1OIWGStT%TB< z4AZ5C4``BdhQ(_5@iVG3M9E=QU2DI(jABY_<<6)#zvfF2CJ!Qc-dvuEqhWf{iF#k} zKx>KDq+;VrWB1P`0&`oI)F#dG%NvZFE!Q+Vss6eFZ?MDF?Bq98#DQU6(;o1K1din9yPe}ZKTnX!r5mzT8dgZVJ~F4A^i(`(vt*?bFiGr--ehThaFdW<+MkU$XqJtj3($B8n7Ry8P*-C6$1 zwi4dHZ3+QxwxUVgBTI;SJM&7(6S}u0H^RtGh#;blO)jbGNZakX`+0!Vk;85E?b_AA zXs;ABvA(Se#-Sbu$ z$!iJxEI-wWfwNdG2UyAc-oZ^6MYMdJL-W8hvtz=U?>)*O{aOyk;{H?SCSz0QK^G=o ziUur7FPfQL2yYP59kuK$FTwW%I2n&F@lw~gfUD3fTG0kl@FY|yEsdt0K2w10dHsSo zMS3(Oeum_~Pud)a|C+MZn-gu9pLWmPP0h^8DDR%|{Eb>I*ABWHXP9{P8~V|RkdO%& zVG0XS6l-0Z6yeSTa4}A#nYOiXJ64M{H*&O`dDCHjzCgcHDGG(z3gFPzsVUP*F=izYy zFio*~`LfqPAX&NgKr5f(alsF*KNTu%H)g$w0JVjk7g)>S(njGsPT&V70K^6q|x z38U{a=mb?AZ2J_Qsy!vB!K>d6SDmHB#S0+S>I%WPK{GZpgI*r513mh_zP`7@3Z}7B zs+>fBPyVFbI7H7GsymL&rc(om*VI+xZixqobogoya{GRdiQ4;I^}P*_y0fGDtVCe%jGdDB)Qz<|jx91^Vbtj@0mb3~eD=D+Ee5NVj;5)8gmw?Zx@w<&= zCJ7cCEX67GaNow}DGF$XF^6%zxX`c0hFB=q5+S@dZ(q}FmUG>%&HP*@xK@zIMfP;p zqf&S&*KB_UQ*9wd{73=>!oke0`;BM?Ysr3aQotZHo@*`&Vae*2TaV%i!9OPL;!AP0 zd2X$`WDJzfANis?;#wK1vPJ4(HCk5i!$h#RTxo- z9&St_=NhR}{JHTnvc03MI@whKDS=|_YPQm_p0*E27`6bi*=U^vI7gqsCLFUIuhIB^ zvoZYDyh5WCD4R|Sp#AFBN{#$DVlhx`S>Gde*uKgzs&`y6W?6}Bo5ifx)^c5Ft7d&K zU#k${b*LmEd}n90A7@3RMd6_~tE~;iAr!X6sjMw7W{4^sHpF!Vf)m&o`4NEsgN<$J z)R@<$;}27~(Q~U87c))G1Wm% zTB+SHlxx*mP89xbG6i>|fHs%iO4XaB+x@mJSIG^9(CeXL>c+Vidc@9e4FAB-egPW< z0!o5UM<@D0Oe`Kc%{9r7YZ^Au03(1_VlPW<}4sqG4HeDu``Pb!>Ks8sB}vv=i7c{S8k6)A}A z*|TTmZP&2lAo~UN*81$_qoZ$t)`>{? z^jU7M@mR=oFB&w|H2EWq^5A~i3O&#ncV_gqw$Icem<# zpr3$3wcVM{&?q!528R-apVY!ng5Cmyv$daK2Y3wsP!VW_pXkXOmeur``3^5GV%bv@ zQ+g_9=#_J7GkU)pbtVk$K6(vBQ+Xm#m9id)U**AGLzoTjy=nDZm}y_!v@{P}=a0ZX z=|`Nt5=bL1utC=&gfJ*XaWW8X@U}N5g-z?t#5;{1&@wX{=`MaeTUw_5ezNJECd4ig zvI85&@OQTSVqix~k!V{S8M4R~rL;G+R!g z{EjeOa49V-d(nF%_>)$x_O^@+pF1*tP^l?%r-w*>IhKtpBP(sYug+04tl`tS-@fub z|MMIqk#pXbFE2DJ!PzU!3l@DzUPMhr^|{T1ceH=~V#Qm}Xc`C?{91hBvikQ27@BYh zY&$TkFWe?Oe7ZH>3O`vsdH$j(%~4EHcxE>lltOy%F-4S=qWZAc)}GH?Fv#oa@l+*a zq)UtKw>Km-x@WlD5hbyPi8Vmn2?E~;tGI)yEKdj(7Tjl9k1_7&zm}uuG`El|>aGAztALZ!|kIu;hw5wubVtDPh# z*-MPkYi`KO^)Bth@x#yst+acd_tnrKKnmQqvvc)sY?r&q;?&Q~pS1dYGG+Vr4N->{;~0|ro<%S(l8I6^KOp-;JKIYii_SY5stA#QL_aF1X2aa zY9z@#C%0+>-7})+nTGR7MQ0+^z`kGaK3eqoTeEG_nZE3`@mGjCg!6U|jll^Mm%mM& z?)zE6lb`A~wj1Tl8~>1kbTI-u9`vul0*9TAX{&8{b2teT(TzI^BVGgO@E%?-07X3(FoA%CBDL+&NSV4a|tW=%+9NEFtMQT!4s~z2Fkd1n0cVmWLRP`U83n# zwmoN>j&4Q?G*ysFF$)U|!B+ZEP*$u{&Gag4)SN{tc}OBDotBx7uFs(U$4yoKJ;8R; zwj>LX3aI@uP+9x9d9lBjKQQjB9F8RD=bWbn1mwuZ=S<}1W=rO_y5|ndODpEgYtgYN ztSut4@>W$L^8`2Z;%R&a2b!!$eeUi~ZcvL0jek|~HO zV=A>v{Oi_o;?!%W9gEWG`>e%N+nExgcZyWUNH-6bm} zMGn5UhPI;Gsfvqa+Q7VZU4I=&jH>HJDFg-E<}&oANXuTfUWzA6M{*7|C7FJ|b)q;m zR#r_{%YiLD=|X!jxpI$Z7pu76*_Zr_U*Z{3aoB8sUixpQ7;;fg>|IOLC`ANN&gp%eC z#$wI4Clj)Yii%i;@Konz0!J$fEVOx%7O+aT+*LU_R1I)OVt%Jka9gR}Mu7sR@xcOb7L)> zAFhynW)-kq69bYI&W5TA3JTv0(SlWX&aM@|GC>H3+z(s1BgfDK0|R@^Sb)P>&u44C z?UdIxo6tzLEGQ|;*$z7*;Vfnpy83Kp!CU1tKog&BWL0=^ETh*lTQr*v3PQBq{UlXG z<_cgq(3%1sX8*7gnlg5kxYTRk+VT++U4?WfG|fn}zX)t|F2t!dS7 zf~@gbbYJo%2RATQ9RlTKP9B~&X0$`uG)tI5w?`vD_@?W*xLMn`)xTga*8V3}!Z2hr zeG2nIlX!LO7jmk?)6B_=btSs3NpNB{ihv}hxTyS4bDUb$4I#Rqa%(H4~FtFi2Wd@%)F7&x*Aq z@>(f-4x~T8%Zp7UOlej1LH|^NSAz@tHwzh7UmayoAY}}8Zb#6(5!1i zH?ku{(i@wbdA)t##l^%pxD#Gp&$P??po&Cx!$Pah`ZAKT?49QwstPL$=^9v|*ucL) z{Kg7u2~O%Y?{>W88KXgXz4R`5ALymUU)vi@n!`fn7y3#5#E6x{Dzu30x|)JJ3&;0= zBsC87w-wOt>^YbofU;`h%`BVqmY}y+O3|z1cL-w)LPFkr$k6%~uVe&tfA+Ax?iP%Y zc5}<*+Tkw{?ZNvTHhT5@ zGn(Vt%+iLg`?A;Tt@an%t`LgVVPO14E~B1QSnJVC!xHrOn#Bh6`s@`%#{q=lL%bHZDbmS1h7hj0zpsK z|6hTilZ{<#P)kSDTvHQBL4bIh_RYjThGgl|rz~IlnZtSbvdoqhXO z@GmMFntG{LR~bT9FGG2Yox2;bg$M@2+n65bbEaT6z{vT*lB^#D>vh}xS-C^5uG)mC?~(K`q(Lzmk&pczB=! zKD|JhU81fID+5vN2N={ONh1(K4y_&p32${(RZG0<#gemV13F0irKP1ST|R-_=(RYn z=QSMs?i40SRD3ib?&o{w=jWif>~3&efT+djwBC(=tbN?(c?6`$(J%nlLHyR`aJdz5 zGr;c`M5m{dg*Ta|!*7RdyzD2bS#(>VOBhU~um9c<8rGvh0X4fI9Rjcg3mO^__={5@ z79W~$+brX*UVcFY)ob-z86&gE9086l_l5-5tw92CQ8V26Hl8xwR+w_~aRt2O`kx;r zct!#*@L#|F0Sgex9($85xdCJz*P=!Do5Va=!Po7ywiovH_BJ-oy?mIfHYLN$nA&<2 zmUy~lv**CK&{EZKtMdjC*?AOJQr=R4rEZ*E~gH{YOb&JyFDFi=(u;iYIOKFnQf zmA`#w5_|@fC3L{jwzU8B29vF)wup|s-YxG=a*I|*t_l2lf~N1WMzR5dK%9}1$IU1w z7j}^ZrW|f<8_Ll{Uz~sR@QK~p+A6elpm~+im4=J0d1o*=;h*glkSgT8tgOGE`7XGr zr5*?gh3oh|MYwm-@WDG|HE45@B)r11wPNv_0)#hjpEzSpo#e}ie8u0*0i6p#KvBbT zBY71p6OS%JH*HxvEolbWbqB5Tt-6QaYynbU%OqT;Mn;c$Vk#?j{21R`I8!#fmf$p3o{+d z2xQZ?J-y)XJC>)&Eqw66%o_l1(+OP0DR^dF=ea%A2NF2wjM8EsRA`siIu|nd9>CLq z4Mq;FBCrO}=y`im>|B-BFQ4dY18b4->=NWb%U>TO$Txo_Dhd*L|C1VtY{dzCb@liQc5oZJ7iF~A%sNdu!xwFs zlDB%TVcO+0_Pq$9EFkcmkhaedygd><{JtI&6B8kH&l7a^9!w@&PKNuD-I52zUbZ;O zk03DFRcLuCDk-tUOlNy3ggAw7UN)sACKe8FgAYu4;sC6NOAnFb7f#_2(~Hw3)jNrB z88d`?J09W>$ZCtn=R?$e0Rzn)XWg;Yp1G zCMmKR!Wdb`ND6QEGmD*p~?d|CZ==!e)lQ?%L3zgTq)sly{T%|yp@#qD){Ck0( zgABjbr=w|DB~G1>*whjk4kDqj3X0hdCW%1-{sU=RflzRLc}q)+4^oZPvIQF)1Ur+>1>kx#A52ZRja0cJ z_JK=Fka?2lPbVy^+cgT4i+gj+mggb~D8tmdGP zf<>>5SYS*O`C47q)fgC^mk0^mHoCxSHbjxRioQkYOw+730i#=a2**!>Px@+M!9xKY zVi2N4w(Tn$yvLTlvnh2l_dPZYWbpq6(x98(-w>!g)C2dM9u@U>7!7#flA@9lxP43P zGl^~Bv_NLy^e7>e_Z=wA0j5L^fA z$H!1y-Znov3Z{hvH93dKU%x(SYR)Y!d0rl^8uo;P(ULF#Tmx4osCfn8p2y(%Ck}0= z!Og|tO2wBB2k0aL_|T&n;Go6ChWq+fb%nv}{|7%=azO4?k!7t1%*Fv@^c3-j2M6B+ z9~nt0ISb4WLPDPPQq$R$S=r}CB{j9J>Z3flAn-HBgZKo24KD+e znCH`tk9_{_!2#Zm<{+5c(Z)Ei`~rA`i;IiHrDpC?Q*2CZhWGC0jQqslPn6#Z+sxPW znYIN@8r%cUE#Rcn-ZO$ni-7R)7=a0h1qSJ%V_>*#4JLD!*VQ>0nM-Jl6e~KLLVP*BJ#& zOw5o(m<^zo@mKxkb>E`e3k zT>y%4on&j(8er0xOM@xFG55hjeZ=GAN5FYL`Zqt_i{5Lu(4Ukb{>HPdruG5?+7(yFTyra34@OUi38Uc^lxx-0MFFk>-ssV7BH_OD7vhSZ!u({7T6?1_5mSh4)13?5ULtx1|^yR39n&7v8 zJ{Q@~U%coN<>TY)#uznVt2LS5mY$L@8}R^c;M-pHY^G>nr-wrRWEEqv?P4>MHr5AC zxj|TjpPwHkw+4^>R%pUu>kzoCF!WJXEka9gYDQhiu)<2p%2R-TU;B2BR`>>2 z*L@o3ZY!SkfH_6smM_$e1vBpTO0!5^?H}EHrw4XFFEzLjmWR(hG&`c3c{L~^0zYxe zo?-cs0RCzSv=wm(hYT*w%xv2)q|=WglsduLoXDm?JewZTSIq_fmOKHWv~DJ8Xe$uK zOck6}aA2<Xs^#T8VNb(MJK++ibfYkibG(uso7aSnE$og$Oo}^3r?TFW;OXX|j*o zj{F}&Y4;{L!|n&$?ixPlQgU*jVy9&249?{2fAQkrd++-eh5At#jVl`TCVhpBTNoUs z5Hc_|nx~tLAx#Q>^eq7iuo-~7!UEEmV$a{Mm^o`W|M`+Sf}6$qe5dkN5?63XFi&@2 zgR`xpoPF*mMjmSRbQ33;(@ruG0nba+N{f^rX{zmXl5C30#u^WpJK=XvYNWbhto}f& z^X$OP)yPLJkaElnvW&@{JY?}Dz%+b*b`K|GiuC>U%}uK*?Cd&{bCg0@HeL2$Cyfd{ zh!zWRgb9~CmdIuv9TGpnxN3LbDhyXe>Kp`ca;qfr&!x20;da-MnfWYM4 zV}PUMNg;3pS7WR47iCJp8W^ZbNl9T;Dg@n{A(^f2HJ@%0n(ep4HAKPea;@sIsj1w! zpr8$Uli+PWm!k~6SHa{_@OOyw9G>*Cx{Lx3SFVQ(_mgRm&hDU5#yT;}2$$!Mzi<@@ z>Yv`hw!i>uIm@C;MMj!?i!+7u##i8e5F{{1DS_PyXT$|(Qyf9nUG3Z!gzAUMPrDvk z18>x{Yk_>^l#?~6_@}Eqv37R8&_KM#RPJxjlX9ot`NQL9@WUeG!s&2XApLt7(9d&3 z7PFyfh+w37{I(u|)3$#G2=)vECmpC8IDga`*9mS%M6KTA&8cr}i3Vc05=M9$6id~E zXaMM_7dT(}&spNxciw^%TCm?~@T9Z)U2qojzXP&s%l!@{IR1pI^eaT3xh3kLCN^4y zv_-&7yC7dCrLYJ&D_)>vM(CUfYXU9C2w2JIGV5q&j!i7meiVMPN)+xOxkErUd&qB& zauAEMm~#d%$%WA%=+fhY(*1_CU{C@eri?m>M>SE=#kPX>TflSL_QjP>kIboK3F`yz zgHrt6yC^{#X~K&HM(`2=asJR;BML)C>8<;tLqdl7-DDjIw ziqlQV?fWhAa^_V>g1zaW5QR$B_c0OzydR{}nO$T%CPgqdM>I;Mj}Dv^FoNZ|S<{t% zxrx0sSVgRiM5Lr)S*3g`YWt}F`!!*-_}+$?JeY$9_zn}Z6Thg=#vH-r5D=5h7yrD7 z5PZ{u1zsVWZx+8XTVGFk%oj(IfByk zM*offHbjPk=c5t@*`oyS>W3nD>P0=?es(%|iC~BZm5rM0g3^M2|DHv>j-%R-&apgy oG%l>n^U04p?*A=66Jrl(@Sh)_O?|nOdi>7P;__l;BKqI|FPAPahX4Qo diff --git a/g3doc/tutorial/images/pwl_calibration_distance.png b/g3doc/tutorial/images/pwl_calibration_distance.png deleted file mode 100644 index 4bcc33654f3ba1af8d9191215f3784bb446d36dc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 20211 zcmdSBcR1F4_&qc>aGLhvT?!7oX32ocndY&Z6&VD3cH}5TZ~hl3OZhZ4?R{gF<1wIDZa4 zVQU>)ga6>V%HPsG4_|)g??=JU7u*yL-Egx2=U!cT}+{7C#0>;W=r^)dYnY(o*2MU8!B+ zE4<9hS5sTl9!WyctoVeGm#xC$jusXLxja#@tjc=d&o_aAVQB2d*{;-%vHIm{RhK{A zbv?2wYjr)|J(wyJ0lFyUYebgM0}Bmbv;OoP(eOb*_y70)J9!@OP9n1{~vG?L`zy25%muj62|?+UJh)+1CHKRHIOo(db##sF8T{ zw%Jvq?HlO{2|w6fg{rHf<+D+!^!AEr*7al&ua(C7n*t}hU#hbxw|VTw^;mjoF7xq) zXyg~38u4r?&CShi?CwUY=26yC=XeM)#3rcb-61iQfJZD>8bHds;XA?|BqQE5( zraY(^`ONA@R9u{gW-yxh)$7-|mX?-x_>!!W<@+7<^z?o_ynddHj1GBI>9GABUTTL9 za!H<_-B$9)y%ME!znvm?NIev8AeXb+M1Cnmc`fSkzfFnA$a`P)#_AN{ou9SMTa(;PwUyKG3+j1AIX_s^IZ$0Kbpym6Prqc3sd1G#&9fRUNyU3 zqugxxZ58yJMR~VsWV`){=7@(KuCS-Ad79l}$=H{4R#Q2a!$2NZ=@*VrA#gF zjmP(nnZp2CyI&GM?Jcn*55a!mByKw7n!Y>3jl7|ie36}tvf>lyUc2vAv`<0}dG&ls z>4|^{JcZ7TV*eRB)_;?plE9+i!^}{%YH263uq~y+YkUI2=h^0iBQa0Q^3;*Peail` zX`@aBe!X>$7{l^L0G;{mZAf;bfXmsf#NLbAxoXgY$01*cLiq&_vHhQqW$hnPQBYv= z;V)#}*V)l~!eY3ZC!-_Zl=NiZrUPnG5*;cmFMWOyX^fny&L(M=t z>jlLA+MAFc84vp-+CwkU6qJf9zK?)yEO@$<61{r!S3_5xJ+bkR({#3hCcD5Gt3eC? z^uLDeD1yIKwUWreElr|#?(p;&ST0RdeGs!XVAHfVu$-5ZnwWSQFLB~ceF=Yu!$1XY z>kHhL!si1m5yH;mSm{4*Sb`-I@$VQS(+c4=9PdCwgEi*$Ialo;l-&a-iz&ZXlADL2I54GVFW3}l-dB&G2od$5h7W8A&t z+SPvUg#6jwSq@;jv+LuI$QB z*k!vi>@tci=40vRTuC=)mv#Xf^W(WdrsVyM&dS8_k!JMnbCtSuW!ra-LOgb6vy zA(WM*#1yq?ILJ~nJ;&#vX2zxjpLW!}a2(b*)FNI*#`bJ4x>V&|>|d!2AUAE(>QO=G zJ5y#-WfJ>aM&eotgP_0HL^lom4pU+gwWE!%k&40SjIX5Jo9Dy4xI6Xc39`dv5Zc?a z{Ts!M20v_J8^{;)Y>U3@$97)&;DGUvA0>tFI6J|Lb2LKOf2#de)vfSZ+0S7p zI>4eec9KG?^p1l=UZuSFIG}=*p{fnn(Sj$6AHAIeEsZ&9p8q7b zqpMYs=Y%W+9tZft_03j;v{G=t=E%4=+)K1_7B3;xyP;8leSd+4HOfM-DbOeym;Q%o zEY|pd*TDqtiiD;7x01QN>0}pCrdV^kk8uNknum{;Xa!BaIM`6Uc>MkGPU-NyTp>sK z+i7O3AWd(9h;|J&^p&&#I|$@7uV_{Lzzb}^$eTt{X+`_DG@Dk0+2!#C%>^VH3sADv(@DgWA{&b6t=ionzF zNnYR$?D=V4Shm48yy8waR9-I;B42y{x|FcBLUPVSB?W~$B{3#!ZBw-Y*Bm8IZTF5c=OTZ9l~vJb=Nsh~v_s zOqwa2`)Z6ZnP_h=jKci%U7H|JcR__H#X`?-V=z5&oCuXAC!7-f!Fn01AX;CI6w7y? zW|}e-Q?9LTsf#13)4$;PI-dt#8$~j?A)AO zp0?%Q#uUBq{q_el6Ta(G=@Opr8-ya8g0mGOt;VXHoy0S|4i@vLT#hpX7o_{&Uh)9I;BA zM@L8G z`>vY%Z02N?0}EAH&yOx|x#WUqYrm0n-}j1P`{eTUJnc&>9=>H+o(n(N3kGZ<6D&SS z(JxdyJvm+;9Yyl%)>uX%&$Th7tR{Pr+^VY0B$2{_Co-q|J?&jx_MM%J*BxpIhsb%u z-~NotW7M2p%f`AANa@W5A{rYrX}>RAs>W_x`R$vkVX?jdal+uc_wOUn9CDDn=Eke# zX>(n?=-zGuC!`_7VJ>7DdW38Czs)7IEzF;xy91Porj_H@^zSw`yr}%Az4{j zLwV%oX)RX4_4P9Sw&loAG2;%Ax4G?vJ>9sn2rx#)v?6IFNl8f{3lu7}0hOUKI6KcI z=7Nj*k*}F`t}UKLim+HeW^Zq#wq)<{5Jxt2e1xu0BJkrSnUg)v#-=9YQ!DXQ{MLx6 zzO2i4>_1HP`RntiWU_hO>L)JpTA3ZsP^t|MQ?BKW3S}SaMM~3#1Ilx7}gjB1O?lE6&q`5 zX<^ll$MyH?a&d7vxvEQaJfM^mul9K0*}0}Yn{0$Wa~MX`*VrjhZ#1*wj*k5J&Fbiv z2TR)aHfJy?Dd$l`<#u$WOrm#W{>^qI78DgVe+|YF^vwj@%7LvIR@h%K^V?KRxy?Q1 zyZ(92=R(HM)lhepIFhiw#l|$_?f=qVBR1z71ECiyc`sdRSm@1V#5FYkyDh|-=`}-) zn#@)FoWRtXWa{|r#fym9+4~h`9fc39t>trjKV+zzU6znwBn-#y?d=WenbWz9nH_dm zp?YdpgNKEJ07&zn6CqXL@_t^gSvX$wX+4ZUS@<&gEd71iNPdeY&xY<|gn2F=s_vh2 zTsx9IYU$8(bj(N!U8L0}aN6>t@nHTLbLQ=Cgs?x8)R4cAYBgmgL>X3q&*9k(K;kMx zgvyr6Hg~JMKq6axf5>B*L2m*k-Ry<|Me-xROvx${jl~GJ>7Ei&a03YTdqv?#zFO zFv`z@DDh)jLbP&~23Q%Te6G8RGkdHIvG45k6R6~Y23>;qu{tmOzcmP#3h^g87LJRQ1n=IxV_>?* z5Zm6~E_>%r!jC>3o~^;!X%woKbkTXsPmd@UT(XKvNMw634*vAYaGBR!k^u@eTItx9 zF6L?>oa(e%K8T!zWAJbPgrAbVb$Y_ZhdRY2OPUu^}+_Whnsf+fwmh}C$kKDngxMwTyWx*fy0 zvAylu*56Mc1uFNOS!D9I*$wT0D{bv|?pDd*YiC_?9c||S{IU8~bk}R?ZN5e2C>E+S zS+EUlx4M%z6BWlFZHx}db&f+SpMTSw1XHkT(qyOH7pt`QY_hihqC)>Ev zLDX;W1M*m2AFor-6$(^Pk_4eex!74&(O!9;g`-&iGK82V1NLQb+M*dp4VtSrUEQd4 zp&ClFH>@mEIjd~^o=h^@9qcTIhT@Y=XPqALKrqEeiF&Ni1Eg~G;SaX#{CthuG;B>p zLRy9=k63>BnMMKLppfg*X(ogeXFPkVvmo79`h}&Azom;zswp7=H)l%u!Y4vD-blKuT0;#c8SC)+mbyAk4erwHYs$&qTcuvMirgNM zTHsVFv$G4!o+SX{HyV1P{Xl6GGBdwt3~?1u1pdeY50x%3d0Mf-@ej5kPvv(}`0`~U z*ok2Mk+7TiL}pyr_U@z2l)qUzds9(ZP^V|x(B=KN2cwV@w34DCJ4oKZ-2VoI8Now^ zey#V5v|-D>b|_Sde>&$GVv&(H$?o#)`6W^JCF+2aeH&2hrH?Xb=Edks&Zr>HR2sLO?XOoH zn;?l#;L)S4-94C0n*2l24zmOKx{vOCB$N10I#Yvd5W< zUxW_zYDC*N_UAhWe9{Lg&b&=ew-sq6MK0-gQG$!9dpS9eesA`fVPIfDMML8p3c)Bjr`Go;$vH>R3E zTI4=HTXN6z9hzTkU_XNeR=L4^zy!|cX%m1CU+l|ovM!tYk*xrEaEi#|m&mrB5N_HH zRJ}q)0P2+<*iCf`& z=L2V{jbAcX{#mwKxE@e7!)>+(Xjefou6eY{7e6tLbkX1sA4XXFJ32a2B)x?}l20lp zeAE^eJCvnF#NH3LC7lTC8Z&tf@!uJhlE17$8=#JhCqQgUo` z>|9IcEz!@ENrrAyFJH%cTUv8C)QjNu_$T0f9*(n_kccVSwJY5VMng?oIdBVJ5{qkR z139TmUb&_qMrBFng=$_x$Ni|-*tv0A6}Ou^1g$SxJ`1fdTtQjvQsBIi^kh2Je2IP! zhioq+sGzKNo!3Bpy;3KXpx7w#6SL=sIYr@iwcZ*|9oW;`b}P5!=l?X#GIzJ@meg@{P&EerGY|- z@dc)>zsaAd_w2TQU9+t+`5NIEb9FVG3NRAd+UwxcuoyjZ#ia0SZQycT{8_UDqdw6#OzLeEWs>Zz!!E4sOT`q4+_9@S1RQ{=N{+m*#Y z78p)_p?V-RV{Sr3a#ptMZIxze7nq>hf<5ot>i2$b0RgP~(vTa@6u+5n^A}J}Y zNQ_F5uS4`#w1l2&Ct^rz7(9(ERi&>xAV2|ykX|eOCTOAKl^P=@X8euFwp^Rd@L)=zQa4_^6eGdWegE5zbyD;{UXEQ)N3{(5*|$vB=XN?XK772 za5ko7e69Y{;Tax3U?;Usah`W=+ngOIfLd4M?0C4GDp9vJHI*McxY%WV?a7lTVJRW) z_u5}mc3of%O*{7QqsldK1y#InQ-dB zGzRlpzWoWY|FN9gpT)2>Z%2n`$!~Oi0tAUbDS>={r%!XRJ_~A`k}n5@*Q~o_b#RI-hyVvj%_6F@TFJaY20 z#g!9Arzgs+{$YhBUl?}BJ|@dwxLln>jB2A)U~5)p-Gei`7W zC8}^3f3maV1_|HJ>L?w!jc}8IeRE9TVsoFSOl(X^9}5G+6Fef?s7M~jku{+&$0=h4+ABQsY>oF!u> z**`*7D(bVtyH>j{y|d7Z6A%y}>VI(k+qZ8{-8Tl$OZgY(y?!lQGC0%0lNC1Z(DqKR z@Pkbc3o)a}%EwacvJ0pWPE+#Wm*>j|K3}wr7M6{eXH{ti@9(u1%8wwCpVc%t#6-v; zdomR{*_L{+_h-K2?b8}mDzr3*rufn9I5xv!YvbX0&%q05bp8h(t zT5oexU1v}~7An>!%IzIm@Jj5!1qItd+HbQ7t0 zdkzJd7&#rC!RhAfTZtds7PTR-VPj*P{J04csjjY$l}-jxUF(qyP2|xun<`{;)W7%W zi0(y<7?S{#OaQj_Tbs4niqTb3&$aY6H~OfNCacm{@7@vSmO78i+ni?(qMujmMlEAL0~z=IRCK_@m55 z6D8~IE%xQ~#J4M5j0!4XB%;96iD`EIw(g1BBlaSIMadNocpSxWlsKEN6Q+N=V(ZbB zFwwwr?9P#-nyp`$t-FQ2H2WE}?v*Z|)WIo(mMiGMfA|j`0jAb--Qn+0t3Wnqgt6mi zn$e2KO<@0opf>n){yYDR3jiCn0vrexl#|0@Kk|t{Q+l5Z8wW?oZQ+ituCB%K#|IyV z?H7LL5v_gRHN%)=d6)S*#x@%qe$%qDuPeTih`sxpcJa?U5c*4Q=Np5ekMoTks`Cc! z>FDT4`1q7)|KFkvhlIhMSkH?2dxP0(J@+uOe0Ye9Hq8~3ng68&=VccYO9jwoK=|4< zsGZ4KSOlOEzxpgAgCtzOclMDR^R0xW_LP+0J)<9o?ExMNR?3yTbB8==getl7+qWOJ zo{sEqc(zn;^#*nKW%em$<=W8Hd*|*A*9z|6v5F> zMY8^?KRsA3xd(s*l4I!d(FBc{rb8tmnv;_gmXg_C2tZhK_~{q1x&72zRj$24_h)r2 zy9dMT^r4;MjoVPtve`r?!^X$=PFfZ_2vFL=_okbjg7xR6q%eaqKlnD1CF}$*m2Yih zn+YQRD`b*gTOfG`5CX$;Tbej#i3pv14NE{u5%>&hX2|OcbW$YwnV_pT=~S};n|2bH zIMTI-{9A-V5)GL%1pu0iHyxU(G$n(TS-f!_>o(7U|K(PH$pwi(Mr#ZicY`^*qLK#9 z$*%f#G7e-_lrJaQx4s;%HR)(dg+oTB@KzTbQLar-rq`cT8>b$rUD&TyJ_YkqB+t^w zh{P5~XY8n@1KeF0*Ir92;4ezC4B;SMyzubwAB%2vxaYMb)IWUe5xm8rrv5B%=>05=4_Cv(HA5@QG(E+>tp$lQzCrNLKu}N@P30m0=MP z)8n4Mi)BRax0CNI4G=|=GE@5aD)+Nc5^Ve|;^Lj9Wa@d>9HG1R3tBb1uq7pBWlkw6W}ENnSSVe6 zeJgP4Dv+!s3z#B;>U$Eu(Z)$-H#*|zIOp&fkQPBeS~5H*qTjvalRR80Lj@(92UMS! zv;)L*cEcXOA7hYRwfwnnoQC9f8{}fvUG$Pi8%-o5!{7^$lujv*=@KHzk-W#N@%1xj zFD7d!p(5yiuQcJ@#>~sh8#L~qY;Vu+=&jN(H_dq@HTR!N zEzYEQH6*+p3kMiZq|D-BQBmicTUu^-Ns5az?2fs{-|3^;f1R3}`(gPej zhJR-V&LFXj=N9Z+0|w@JsM`SR=PJcE0v5*BuZwmIUU^pW4^pG6-t?m*ApA|--x>4|4|6XRzNi-oVD;}xaO|dbWiww*`_-*K^&&+s+6=HNZ|MmfqgS?g>^2Pg5 zOeiWUBCQ*6PmKPOH*V-HP)7NEUn%WXkt%2#4q=e2|HCwKV&_?^28ODkw4ruXWLTQh zfx5uTDhRTae*3=oN|NdanenH^GD%NNk?x`}AP@jIzH=I`b=>S^Hn z;@A};5D(I`#8T3H2LI+|&AuA4psEIAUb+q20r&x^JP}645fH|nDsckv3g}s6hJ+`8 zoEXitYXavsDNYJLif$e;n1!W-RN|nZ5fY++(4b^w#K6JHxd{oB6~L84^?~(2bg1?v z!|N=KeGfCC%YvLi9#r%3$aRR*uU@@EfhEg%c!&V-h*LKF6y%ENPS0{iL8E`A5~|>_ zU2@v$S&J|uc(S(4@yot7?zwXuTwJ*0wVsh#S*(a$-<#HMSvsP&hZv+fy)2Z-A? zCm(*hYOPW*^Z=>=I;u|@R}woAI3gqjTi3vVNQ{P}{FP@8VV`n;MfvGA1tNd92Y{XK zt_)K_8DseAAq5neuER5sU6a?~a@0)rB-)OS;62bsv~l4HK$xB@Ls%$Lw*_+8x!dpz zF$TRhQ<5;X5`fIa`dmVcoFoQ6D$zD`a9DjYNVL9nUVN z2tOd?q9hP;oIr}n{--=^PZh%Sv2^m7OYGZe5x9SgBScUFF83^G?DQ%x$CF$T#^upS zm-P?{d`J;}Yf-Re`&!~g`|<@kqmgq@_d@j%9J8J1{1-|wuMG<*f`{xY4(Z7KmwfdeecAns^ zl(TusU@qt)lx|23WSd*B9@924qXgaQz1>zO{?rVvGd-HZy&Em*GOKc&bD%SK}t8ZjIWvWqHF zCLb)Yp2WnITD1^Dj#oBZ$|zz_o&#cud?kBkP)%T;8J^t~4gn=c zUtgcr-Ma)}Vvv)uzhUDDM#yQvaG6>@p0l^3qR>i&8nmQtix7(C#>U1E_d6(&Kf{u8 z_mupXbv=8`zAaFuqX7tYXH9C!r?BH`wcQP)1oVV*?z6$lav2~l^$vzxIgyp0nGZe% z7y9Jz_bDWdP@SFE!lp3IPCGa^Qz6Gb;SPxN~H}Ervd4q;eksWXCgoe6kuhbVQqGOCOC@+9Dtf45VB#7+uI*~TsYk+Sc>W{L3Vv_!*0~5+(IQ6#`6$h>-9qK)GFMH_4#pKyn=O(*pw&-_`Q}8Y5I%_(rfh1Rr~Tce6FW z`mCYC0qMj!JUn(zPMjMzZk#72ZJqe+JLM*BS#E>@ga9!+V6=&yh}>X7eONrCgs|G% zXhRO9tczYzOv3liLsMj9CSE2-GfR5tgC!31vtuEAu(1d8%u{)%ZQeWz*nJtko)y~C zS|LJ#5rpNwXG$?5SU+mA0iPB9@FDMths1N$i!1?qmVnf9U`fhcBTx-o46pFWJWImP zLx?OMu-*JO>e#gCCJ)@utY%?=RzWs#|vXMztPSJl4ht3J3W5`_Z%KJjbXb8l?K2s=?Wc zL=Hg%5+eax-)(&j*%Qc?fk!jbeq#vA=uN2JEhg%`f!rW}TNCZz;%w~ODV@o93q7j_ z$^{v-0>mRqO3JI&T`A|{Nm!+s2nYy}zA?l>#LmaKV(ryaR zA}S>-i+Ukw9&k6RZtjIKLUn*}Wd%fqv$fpjH=K4fNkcmE>Z$}-MQYD_cI_naXiG~= z)YGS7M#T<0&gDk21qHm+G&BT4QWVw0kao+l}f~D?b%gMh6_Jf?9T;jas z%Wi9GYU=A3grnPQ(UAw1OR>VJuQiS_&XR@NNKaSgVMthennMi%6tdg%J#BR6e!jVNiSYlb-uXL-q{(dn%4+zfi&WezPoEJP!d>*q)`As zQ+ls02R=ce^82FmvjP|d2G?b{6#gT_RMwGlXmVjtJrj(fho|neVlA|Fhu+dG8Pu1N zwip*V9N4J16)Mhf_vK&gH*U~Cb^bU0QMnN@NCyG3oSZ(8_uYI1S_Qq6)}p&*NCK90 z*?>bpfYjO9nWU9jCV&Yb*AyvV5n#8Rb@fbZX-|*lWhm(B&;Jj9#`5^D;PM3J{zu*n z?D#Q?(7~H?R*zLFz=g=|dQIpCMHO75T#35@cq{an8{i7>n3~2yD8a?WW#i>lDY%rD zH@J*RPL2eST^{mcK!u@c(hTudkO%{u>Z<)eX%vjcwNG4*{|?>*ZMjNM45XScXz3ai z{}IbT$5ze1PD>}^c^jJ+I*et=m}Bu{IUDJ^6aS1ZF*G%d=81?t2#w)^6?^OVB%WZQPXMXnVnfQHDV7vhBf>gOF z?NB`9`+>z!3S2ePi@iDSMg`mxy}+ZN+q^vLFT?WGl5^g?dDEb3lMxhz+5z%NuNZ>b z0Xt_3;0fDhHI6M_Z~$m4D=YuOY)w|WpIhH2-5hTGI$nt<%Ub+iA>v&J#=*(Udp_r&)E?>^pev9fqsd(E5IlVc^9yH7f2w2JGmH(T1 z6?+%qy0uD!hGzli&t#^{4c9<^(0S+RPiItNCU zJ8{!j*!#*`U4T=TUX)m74Qrdw9RP0v??0N->-_w|lB<9I0De>lv82@SgY5tx%n;yn zDm|@Qss7g4S?azF<*n{lXj_7ffY-{&+PyXVs748YGTXVl!TE1;xTx_RPJ~|7ujOZb z@juGV72^tv&FK~(717HCNK#T!VWDb0R^`DpL%kU7fB27`hX)@bjjx3+*B7Y*WdI5?P5nt(rTao5LCn#Rowj|4ujYWXJpNCUz)kVz&z}kD z>BM;SojB3tj(x9R3>yCHx>wyq-=k9mHgEh$r0QT+LTDp`bwZi>iD@)wfISFuy0 zKI|kiA!U^0d7%7yf7;iX$=2x{MqY6hc||WwXdua(?Blay)0qgeDirbyJclL5r$U2^ zC@9)GyFJd*E#we{n(CTC8dsDR%W(vc1l17gh#w2`>hPG+5gFzi(=q=b z{-Hu!BO?|63R#hu$i|H>k2~Hl2#BvrlSnx^Iyz# z)Xr>zX8*sTba_aIS7=%3JayzrrkGbd)$Na&8~vKt9FP(#37|-K^{PTK@Un)CZrkk6gjv z=-;&L7NOrPvuj}Vam}uvddnGtMUR|s>sykDtnn$m)(?&%mqD9&sR37@2{~HNnp3&$ zSfI@J>J`H({b1N!V+hUp!2@>T*Z&@yw%?B0SYL2~weil@#;^zQ{xnWw=1j!Ow#9ow zQgNj3w}p7oLh zk@HFJj)J6F!0lGK$o%gkNU;Uk{rVS-*4`fjDHk>VSZS68!Ir05S}0UB?)d>;@^@OV zZ0H^&_B<*T(|TPmX=27Csun9En*RP^6SACH>YHXbC?K?jJ(?x?-ch&rc$_akEB#ht zlq4&nh*Ivah%f)~Y?Bz>+8lTE(Y>3FLRH@m$vzGN=XSlO1-D{R?3hNI3%z&KSL6Qp zJL_9ig{kgsU3%q(FJYGJ#LM+M-~M{>c0y~(wqtq#+p1dtn_RXm!dY*yam{F0~FO(>yC{L8v$@Vefw3BI3(k0MKxR=59% z;FbDo%o-yjf#MgUaeus>gYPTvZE2T_>E0&|dmYxzHomjA8<`h=i<{l>#QG+lV#Jtd?a|x<{~SEA94!R?bmKCbpJb$9q!+M*{UkRL|56<>vvf9_SM~8{ngfg-WsKN zZzFd^)68Rq6=>vdc$f?-KPgp^AJ>O&4r2K^*5L(n=e0U}Kd0)cr;UeC*U)rve;?$( zVQ5)iyEHm~q5D%0&T$@zz@z@5(L1r&&n}zvkNtDKU}lo$-l28*%TYNt>pEX+AnPrq zs$XRywiMokUt@*{UineCL&^HC5tX2$@#t+muhki*BVUI>{bOeGePEUXz|8nKfoX(ItfuXT|E4F1 zfR#5*(!B3s=)fN|X2NIc%OZ%sTy9IrKsa8c?A~XMC3dNΝ=iRT3Es+TexIvs%#W zwrV`0==-MEEu2fD=wp{y_|iS_rCrudo6a>#r0x+Z@F;sG5+x%ZeMbD*(snO=`OsRA z3bmzng;!`~X`MmW_EYv9$EIK%?&oPMSH5rm`&HK&hg=}RGlhIDSw>yo?*$sihllR* zb_0Y9{YP4A8C3OPi3K`#wtB9MZ%pj>DhgV9Sbs>JhmndoYYAd&Mkuitx;@}K{mR3G z^zN8r76F>`Z^<&Yh}vjKLh?15VkKvp5lw05`MXZrZxuyMq;E0z`@Mrx*P2sy*L{WR z4pRScW}h+=pR&)=_g|QwPl5kGAW92cGFqB9q7cucl+8Vm(luicUe>=Uc#=Zr%_9VH zkpiR1AS4?Dvoxa5Egf9CvXm6fJZ=MjSGXAnF#*Ld+iX3vyB-#jC0z;Ej9Jb3G5lmc zSp^mQ(US4_=cM;yoAw)xB_x8OXUk3woo)iJl=RDx!LLcL`%$cGtQVqz@@ulm6yQRT zqi{^lJW})H?gM9$nw(+Xy}G@Tw7cO~Wa`n_1xZ^66+UVWI{DTMuUo-hDKI_Smme5P zUZXgz;=!7av2_x-Os6#^x2MoU-)pOVpq9Pk=P)wpIs*eqhTmndqv1#Sf!;UcZ-ot4=kx##>1@NM~Ps5^&?gxlbaP~w6@eIkU--hozttEhdF2J>&3PaJv%7UMXiYxK zjwHWtVCk1n^>Ex1Tl}CA=5;+v5$bVMT4`+k9qdgT5ME-keR;Y;>r+H}FnxsLX~ef`pOV;}H-< zGG6HN$r*mYhG2#2{=G(tV$z`dcDvZF81VFhcW+5Dz)ifnqCcB_*I3UWDC<*0%Wq@C z%x|Qm89jBLv@mT+5;vEe3VnsJ4E!Ch`4R)U;+4JjHaKTn9P=3wS(gtBHE13v6>zwE z6LBFNWE_dA)3~U9!wg>DvYb~**?pSqpI6>1BnS98 zZ|I(Df9dyr)mK%JD1lC`=Y8XoRXne)wLSfTU$n%S$FnZgz^{gCbqOi;fgeIq2&?2= z9$2fTDBr5nh$$9vGkUB+{lZwwaN^sKOUc(6uDjccd5BQ4E&J`J1+)L80$LK3zbbk4 zf{I8TYhS@FlMjr(jy5O{jhGZ>Nr=zksH&+^&=0x9M7QfK{yng+&!RF_C1o?Ydx(!g zvEfYyPH8F(=+oU!BBkfcxT-5^QRMd3a4synkw9TCJpM9#ByL&sp-F$#G;&}~zI5V= zl|N^88BM+fEn`vp_sdGJ%vUEjHa7l!9#XrCuE3xCov=UfC5aCeeY6{Ykx;e|Cjk#T0qYqNkx5c&Q0MUETkhYb zbWX$0hpUw{dC6u=30w3WqbeL2He~cB6l-&{T&)QTPdVR1Z5({kW?-+6=>g@20yEkC zJ8>->QBc;vBMRvE6ciME1<0_s&_M942LQn!x^vDY+BB@P)R2Fu;xXgTzCQLO)!s@+ z^QVkANrmtB)#ih(MTt|L$(_UYT16S``+tw%<1N^#K*#Md^6vutKgaUDGE^x3A9#@v zbXj3>+ZRCV0m7&=O!}a{LUFsy>RN5G2SCCG^uAvKN(GcygxCp)M+;waffh7f_O?0& zf%t}-q#DlXSVsY!RGd4$cyTabzyTdrD8HeEZ2^QL?ekk)3_U`dfO!Ng;P6E~dC~}Q z2+(U#U!^XDo^ql;1swYTfBPPEquP6V+P)=Srl6yXtf~?%RpYe%xm;2o2lzt!{g0gv z&R$!yw~mgsdyu{=^vEjgheLq-L=BAr>bFKYofl%2{5Bf!5&RcvpQWcWA;<;HkW?+b zD@=Iy4EwWLJWrM%C3GzcPwGGWl0Esjaw>WqCvmvJk?XoXj zg=w9qOk&SKkM5Nk(o1;oi=-6~+sP}|6&Do|7Y=-bAq1hc?w$4DS%7VV$zh2)PvdTG zZo-@r6+L}B(1qKhsd(y4%2iwe1_H`F1#~Ni?f<|$T^`VJYsV)XE+}wf%(V6N!b?g7 zVbthvk)2OQj*9ZH&%V2i4rQaO;Pt#-_`)RH=xVD&B^49X7|UGrQT;hmrlFPiS0f`v z?@g)^flK}#TsD8eNKK8_(@VOkN>iN;9b&JObvN0;XJ=i#H&+nIB3Dn-RIG#rz-w&H;+1=NZA{RLWKk&!bl)er6M z?L7iS(z*{w)gqHJGP)h>k0W5;!18OnFI;ZidM9d!GN$FH)svn%m&Ji-7vt^C&4xJ_ z?-yxbW@^~w#5s~XTcBtA!H}Pen;RSaAn;|>%C^fo=YDz`iWfb9P7S*eb(uYX_wHSO z!w;{vKlBqCjd2e=~JMTJF0_9EdNF0}-tq;GO^8p}-X-i_|Kp98S3S&{1N zidD8VYt?_ko2v~zEcf7^)0Ubq$k2$F9tKPK%Xkj4bezjS?i)c3OZH*j45Wd1A3 zBYf%7tz6Nc*(Oy^!N`CFty-C#7ZA9RRy%2AC^!eJvf-;~-N-%wpIvt0T)Z>pa8tr_ z?LL6@{R0DlS*u!E<;=e+{nrj^0D*_YD&0&nPP%kN)C*9HY`UIkSq5Mvti7yT>`x8oSMZxVHz) z>xSHT>2GR+pV5^LJ6xg)l>zmISTP$Kq2vR+=I1|?<>R!7R2#NDE5YQ%x(g#;>aaR^~2Q>#m_P`}jw z&;z>0Z;Oh!BqY*+Q~o@_Si;Mf-|odr zojdYhordfVS}eJBb-=qg&#jT(j(D@S5AaqPbgTB;7tYSke#$KI0$4$S4;wK#%E@8D z5bNzs$^D3WsRfVIX$a|k*-z}=96*23*{ zQ49=3qWt{*qhn(3L34~r)F}vT@ApeGb+kBcEc%H`J{=Vm4D#lbmSSM2t)iv}1`C?L zXGoM96-(~VCQ{SUC1NlQ!0br9YKaa1Gz1JXo7LPDTR&cW3qF-qq2C~;-Q-d-ea-bU#uJ=Ry4Ha*!_AjgcxDRC&i?kh9o zhxwe?moFJ$e$dF?Q#>PG+Mgi^c$h!IIYtm)kk`@zI$aE;GCGEa?Xb}X1u`i#jLyJ^ zHksZL&;ol296N-%GCe=93`7eUz(v?vng6nU*Teu5hhZ7X)1z%*MnJRa0(35rJO>LI zcZDosc77hYR5F<(St~0JXn5cwgHpr{*Uy88g}97lOe$(>O>O!6k4}#Ew^6`gD{OH# ztM!NtC!*87d$$7;B5#;vG+IeaN>Wr&IS-?>7kPLhArCXk)^^ew777sPPH>{`^O!<8SL_pjZl`e4xFa^9N`RC}650J-*v- zXb-_~t{>jy=kL1qc;eIA!OZe-8INq?lnL6w5uEwY#bJ99*o1XiDun(|H9aACL{aha z^B@&uT9Qe^<0+8Rfdeyj!M!%KZ*dkFRIiJQo`NX=XSbuXlT%P|{mruNmFCpj++Z70 z{@3pU^#=?FIgwocPQtHd^RjD@@HBNq%XaazZ&xHR3_-vaz{o z+4}SX@de>90n-}#cUOdkA)5$kn%a0uc>>%XsBsc77`glRxgb%(I|&fD2c$lj@IoQk z$@=>5obERk!`DN+KTG=`BxGc$TUfB+lQLCZBqM@}LzGdLG#zF%4bgO+Z(UulAp->2 zlSVcnF9c$Po%`<}Kkocn9aRRtF^KH7=lVn@qb}$uO0sXn-2X2PY@ZvF@4&!72r@uy zY_lqLn*54|f}F+Sosx`My>rbT%4NCr;z>r$C z`2xvxQz9rEPxT}?er`VGLIMKlNWkf_C-e>#fkO>{>)=jJ31smg=;Kk+(X|0@8OGT! za&aLH(dn5PEToxiTdr+jpbS*(larIW-3ebXvM9tbDo0bVKcYk>3z~mf6o>PEhYSS% z(X4sMe{k{fulfG7hc+e(2?2|Xi@>N)fYpJz6CEFKGu0SgTrqfE+Z!m1jQ>(0xPf%s z-QAIj;dJRjJU$sq^L=3`J$B72**TaKJI3AeDx;#@2%$$g4;~6g@}&ce_6|uP7|;%4 zL?_$ZAC?86n|%V5tx^?IZ9Hdj7BV>TjO~Ec1VUdI~JEeBYlF`zgOv0Hg>w ze-E0XE&A~x(QmHRRkd2+E+}BTH))z)?5j$F-5r1OFP3cU@K6tpjPwi$2$;R)ZHZN( zg-i)>O|$PUal!YnW{=5ST~PC=P16Oq-k_r+5!PA)mJ&c9u?%P#&87dcFP<2p`B>-S RQsAj|44$rjF6*2UngG+nn~ zrdkC2hh`@t{ptoh+;6<`gTIOG#Z~MTtqkp*bZre#j4Z7z44CZnZ4C@8?ToGLH?LL; zp`hGHkrsRU$~kUr+~rB1!$srvVjJsN{li7`Or__@2B1P)Z|gCQ7tUx8BIsd;g0Ssc=5Dq zaTxw&%=*rBij|U zGqbY_>6p|6*!gN%DCVu{i`~_C$sIQqy(X{Iu%cEk6E@Y>)~aVe*?x3-u%4`#DzCyw zkSv44V!Ji+lfPzKvU7Uw?FbjDwH=#-4tLx}~LsHMafZ$B)jLnBEfDw{G4v zIcu~#9+;ll>T1dnx?S@2`!hUUvhO$dl#Pu;u)G^YDY(T@p2^58cO_k&o<4a(kNGPj z<0dBN5BGl-=M}*^oXHiJ~WXM(`e|FyXA#m;%G_|phRxI=8f2l$<13qT9($;tU_mY6f|Rf5ncBw zo-wSqIkWhZrdpW&_oi)5?*^qmdfaR?GsYKOf)O}`lC%13x%|~%b6E_xx-$5i>C-sM z#E`F>#V1h@okGFDvs`KM_1b8i^3+9fFE0%wy{w2J6dIE;2&$dB1&))F_0rhfdnWnZ zdHdqcf>2*rN(ssu79j;A!&F(iiTjbmy9Iv4&qnDvi}bF#`EG^Kp0LgjI8%za(`DrC zt_dI5_9~95L?{{ByCCl8L&sTUYlwHd`ZrZTqV#-GtNzwLTdtWG)9k|9o3#MqwT>LF z|5kVvpJdeUi5JGKkCS1yV!Fmy^;HyM^PH)lCZo;=6mIl6d88Uih#eT-3bK$jPhsf{ zmY!MnuUs0YM)B>f_thrc8g1-x_EJT~YIlMy3=R9%H6#%~AsBT?zFM2Oxg^nO%`o4O zW#~~7NqSKcWW{4vg@lASBJT$7wm0uH(8YI_d(+1pUNd?saqyv=x-9kk90w1dng%i> zAw%t^cZ1`f$-%bD!G`VSUJqhNc{CcuTK7gAg;b8&%6rckxF(VJ1~Y_+~9y4Y`HHSLCrB93>Sh6e7lDtZxOa z$K~|5#nr~S8%|Z+ zC~Y8y0yf9dYq*>rKON_@e31|CQTBEMh>PV@%x%>k+`1_I<(D%MY2A32ZH?iV{*6TEe>M*?l81t>md=1 z2YJ%gv2)5Whq#MB<=P`8&MUk`p6spf{&=0-4aak`?i;}`2`~cz@uU~>(u;J$S{}dA z7xx8i@iw=nd{~cLXkE-Z(=C^K5K{>%P)Lr;3;+1UFhu5O#mG}n8^Mxq`Xv$I_=gq0 zKQIlyKHHv`orvy>&F=P;h-85KpnWPBG{c?rj=BboSY{cdiO9!J(V{ zh|gs)c&=u`Qzi?GTLzU0_%gS z*uR4NUfJqM^q(8!i$=B6 zBvkVFPi(*I>*FIneMxie2X}HS@^Uh~TzjP2Ucp|4o*P<)A2~{Ns&^fN$A2%=&~biB z`uo8n>QqMo{u_$dZaw0Q9$Fizg|n_3g;%G`Bu=+mj>zFV%$u|ITGWS2!b!mhD?Y&n zdBYV2+^ld7@!TtO!y)>b*-OKMcQ9acxjGauXS>x&xi2pocdigzY`(3z3!c{W0A73T zgB?86!Jv=o?DB$H+lrB^gc8mK)mvg)L_a4$e`%P^x>N@)W!{v@S@sZa!%-pWMMaOI z6Ae2nHGb@Py=czb8z;}KZff|(0yFr=2zN#rt&$x6=J=~=zZ_T(>TiE)Ej%uUJ%52u zQg3$cC_4I72m5<%7Jm2$5p2-w!jUs|xWyar*?vlqtTvA{TCfJkh5oHB2L9%@tOe~! z!kxp*h25IGD@s;O|EwmIyR#8MT1+ofLt-SwQjH+XnvSR;HIHz-XC2pe{8_ZEnSjyZ zEw*nWu;d~I!JwW;abNN4Ts&%nU4<5(Fexq=D85dfHbb1_xpBBs&GLJ$(O|*bni{%` zyM^^kufRs6PQQ22Wr-MCayf7t|F>xZaU`tVvns;orz{m zUgh^5%AnxahDHXg8T&G|w8zMj<&s{g1)=*mEgXtI=5}E&wL{4f>z7)T`z+QA+&+%oVuLBGCAqlHZnTT2Xzkbcl)pYX{j$YGUMGh)u1^J$*J$)q zUq!+(#)aG4we!Nj*!bo@6Axr7_!2DHF0NN!fr}1E!$spu=(K0PyN9?=Kw642n<=?? zPq*M6(-fVws$3Nd46wUNvvtSSKyBT5XK)XyIXWI=c&cG-y<7jIhTb=HUB|1!W%{|Rn#D<0sA zyWoH0CGJnq5(WArPqPE4{&cog{e4XC!4cctxpChkWaA+fxG5v-WA>3+dmwnKLZbY9 z%q}Hcv2I^}#FQQbFl_tgb$NmE>eVZht)tlxeQqB69`ch%S&E&^s1<^_gst4+klqCj?hb4pES{?V z=A;;XLC^xA3YBIG=b9vJr{swH3_U&l0~VGL8Rk+n3JMAU^3u}Mx~l_O?Lv<0BZUA* zaR~|0pQx%5KX~u}u=pK(e6i=x@1UZhCV1ZLxx&0W`7R`NfBxNc{C06n2niAP@#5gM zaFaAOF|m2}zlKRrDiXVX9&RNIdfM<1|-blRfWMk~rI{U+Vd=j6=u_t8Dk3d?6ZXH+-Rs$@A}ry_6O?!F-XS&3_v~yeVPRn?q5_%4 zD?W}}HtOX%+wbMK`!;NCS(|f{zJC20?bP#5)MVj{&K-Hddv<*u{FV<7V`)3@k#r3RB^mK=t!|`HL&1Df@)cHYzQ7(S76e6 z0`2t)2wi&Hg!eO@4YqGZLFO_e4=L+%P@X(t~+EEZ2-pOVs2W5SdB>Ugh*yCiG zQ2irQkDIEergRzhTiu^NlxQ$(5sk(_NOd!evOYiA-&-A2sO`0l+S{|&)7R$`U<%61 z%e#Sz*}ql5+0xuxV_JwyO1hfMJtD4<+|*pQmx-m}s)UZRb8GcTC@YXmNpn-?qgDOcD?Ui zp7C1k8wJ*4-4}T`YpmRDvaYVtqj_wE>ge$V*b({zExoj zl0cmkkT*(w+NXuIhN5l!G!T;NRk$GSa+k%m%lxt%k*!mD2tJifdNKD9^VH?8gN=85 zQ1sn@J28XMFtqe#A6SeIZd@-gf`Y5S1D;Bq>PiUG&SI(A=prhF=QRI4mzPfkcNMP8 z|F3@JgN-3ihV|bPxylrmNOH^A4~b#iuuesLuuLQ5=D$zI;RHh_ zt!!`RA(aVa21Hs%1t%dN3Z>DH^IBBQ7Z9dKf%%!a?yK}7M>^+Q#fYvo7hGp!vAZiO zF|qUPcsE+e<1sNYG0GW4@Dlse+a~%Q>v+$Mzgww$z-?Aw~yc=A07hWhUH_k_DPdJ+o z{nmwT`~9XFSAmN`FG@7Y#?JBXHR}EU_Dzc%BPcpLdT)I+@Wk@+p1FPR0^_)3|bwBPN11 zPtuQ+=_|vQfH74gsH-Rp)JL%w4v=!bM5c&z03*1II`{WPr)qP{4=S z^c(Jwk+uJni$zf>cVzPR_O@Re3N7C#YejDJmyDz^R1YWx_n}Z3A)rtYw6VA6++Ap& z4PdF#X$&SCT6ZjS+U#%`)RZ(fPD3s;c6U+iOK5B>f>i$O$FoHTW=jD)>2qP^ymEAO z^f^4dZqxISzs-;%mfPa*;NW1G+$W-qPpD*3yVA_CN#pqk ziiXdnE}C@kJl+F6KLL>*eL=vqj53q@mdvoxJ!8 zIqKgf-LnjtALn1aR8_2lLt=n+(D;D$Mv^Ovd z{*`h~(l%zs?%);lj~?lbmDmUzj@dO*0NmryjJ3A}c zX<-;2{wS4Ur}pwrH<>#OeC0!r1Y08RQPm7f#<}#aQ*XP+ZlnP`Hsaz zFRVYwAwnZ_#|EumU4EJ15to~LicrJc#N07xx}SsDxmXw}r*@8AC_!p%G) z8yQnS{7wK!ze!&($f|PxYifd8%ImV7F)1yraG9slg2L;WbAfX@{|Z_@OY&6{Sy|bK zY-~8?;~g90<-?V3JXpTT+a*$NcsMh)s)C{@M=vXpM9~75VFLhVl;GmCx3^atoai`W zA{+lWBABeOIk!Mq`F5brc<`;S7b;mhn(y=WEegsl3=C-ng~b+z9D&T@ z?Y&-w!&k(VT1G(kqMbyiXWQku>(j&Cvi>WU4Z&SK&$b&;U@P|h>;ec`47X?MgC@@tKJc_5b7qTXW>uK@_7&dD301vxizq};*OyvHu z+b<}Ha26f8%79l0{R!{EBIVB9?gT$WtbA{CGEqMMG3xWzuj^YP7>FO+r}4+$HR*@o zbl*dP3j#qV(o@lD%}TQQ`|)OwR|2mM<;|Nn1y0xU0Yf&-DhO<&ogQvF8Rb)EjX(m} znQ1J=5AO@}Ksh1S{0v1ro-Bx1mLUa`7qwkPVLu$$YEb@d?#7)P z>Iqb0cq{r6!3WqUGRWWjj%!T`f^N=BNxnq9*7ub%A^;CoU0%3B$tBpoc2m8~o}QYT zT41M%3KH0a>nf8{mRh|xp_X%9;oZA;Q3NiIW~16MF*>n5?((TBoO|#@mvZU6?|GzB zoQ#_`H<|J2Xfvd~jU?9G-X9=F!l>=yM)_ugUhe0wUmGSb4tOSHSN8HfN=iKea2qY_nrjui9sG={c^?5YfSYQTJI(_JH_;%{=ZtI> zhms&EDJiHCN(6ca2D6=$ogLeISm^mioj<>SfB*3#T6X2ZNG&;wEoGJ3|aU^U&n zh+L-UfVP^tku(j(m30T-Vppl15t66$^z=x4oh#q9&i|dZzQ&K;XOY%ZroK~CO5 zY!vtO^5Ps-CGX8m3iqvd%KLH8R`X7I4-ES*AR^6R!f6D2`V=ML!VWDk6xl?fcrt!R zR=Wk+1kV$bj>oHv3S1szvNU)E0oQzMyPeyX?YT>DT>mB$$!rgG`1a{)PIZE6zTpdX z^?>T?OPoRFSFb+D#*WU}4X-=?l#Lc~bv>c4x@FRCx*Buwc1H%wH^5t_M=Gdz%E;o~ z_qx@-fq^fvu^I8SDS)8JAgJRL5Rhnoa`rztafPcwg)EnF2mp+YZ99BqOU2k7>i_1J z7D^$Zgt)kSD6R)g1h)AO>-(PHR+PC>z5T5UVT#|Hue729gU)jp@! zFKxN@bLtzo1v8-zZvay*IhqMjn5ULv7-SQOoM|aD%yCPj1WTNe;^Oz1t~n zaoQNK=q7YFCo1hB9)A0FAE|sp6B8An?9|oOMas?@sGk5ibtQ?ShZIidc7gj#lsT+6 z1QK(U0BU*XgVJAYrCnq>zLIXJFl3S%-%vd1DPwEPj^+EIKSSBBf!GK|Cx3XT)DCba zJqkV{A>8C7t7#wI+4;F@kvTEIA4$#Ic(}N>eU28~q_I3!Gy(!+=Pb|Eg6vL|@~95~ zu;nb1FNW0X(LiF0%`+Pc*Q{07#>XQzHF!Sx{Q5rTd_|S+M>I9HG*c53hy%2AbW*}B zDKB2Uz=6`)RY58lY%o(PLvj)iz%LYrb@laRMUu(2)AxmiCo_uDOZR$Yj@R-#odL5# zA2JTo77#&bZ?B?bZFNghY&TJb+_`h-&;MH?X}ZPmd0=fohJU+0Mi1*Nm!PZ#klcIm z{(EaAQZ}VQn?>Qa=k_Fy7Zu@r-z754{fvhQiw_-P-ie(GgGttD+02O5N~w;3%9(r+bet_Y!(_}=YNhWIDqZmi@4l_x+&*j~4Cmex z@vQz4NKqUKcu`=@{jlV1qHnG!U%uoGC5nBKA|UT~fHU4PW5OX3 zjWL|7kIo$%1pHXw^LdW{(E7cqwN?eDJN?6lWdN*rpK&nF~{6CD}Bi>SG6x&CMw;Imy$yS`V0Kd zM4+8fP0_EJLrDVdA6gL}oyeTp1jzmWR6ze}SuO*# z&5jNhOFyr&KLSjnn)ssTB=FXP`bM{uC6&kC4Gp_PINvBoC~ns)-rYAGmpS^P2J8q3i@1uvWyw~ny*!z^>WTW9sp)Bs zTh$KKZx&uFDlR^J*d=0Ge4m+usN0t1xtQ2k!+MfUU)l>4BzvU%{K>BK=WT->O_+aF zR8)h<9q~FhXlXz(-f*0b=@d0&_pONSiDg^nw}}nC)s06*8`*jjU(P>XS0jq*>B4Zi z$SK-Cwv-`OII|#}?+>OLIX>;V+Ezj9pRM+min8Zdi6Ce2Jw9CHAuOG)Z%=QNJ?m-m!nfuppAqok@XN!T$(hg>wBu}}S^wn%94hmWmej!uHw z=iWCuXiY=v?QPh$&IaxwU)_N|u>B-ZbZ4QNHZkSfx3_@4A=Gp`k69MyTTKcz1d%SL z{}iulKl5nUaCYS-OqlqspmR_a(|~f^+wp7-OL?|PXp>p+zyb$#z?{KI!ISAG+eHGi zChSoi7!8!SveE3`F)=YTolqSWXi3|kKCHjC<(m|rmTk7=tpOdzPjUwM5j-9;b=spI zPk>}e;o2VZF}JEd=jP(#dPqxq6_`;c2-4S$9|TIN<1QI3@ECpMFnDw2(j%KfIRVc_ zBP1isiQ5Yr9L|_v{h67J%vDEvT5juHDCC|;xoeDoPMcW=zpZ6*bx!m}r3h=Z z?3;xFR$JG{(WP=MU!j+b8`?wx_FAmjhhA5uC`k^jVwYOaV~hS+fxku^jQVF zsipibI?ja95Dr6q{mHLsd#h=ypkD96qeq|4PY)>^5+!gLCz3H8owqS%kfGRN!Kw%o3CR$g)eD%DNoH2yHT? z{^WiBvUB&)ftL4=9-^Gs!GIhHL^-MsaOl7lP5bQv`oDr1`=R>m3ASNHv9Cfk(;Fz$ zEMs%y`DVW?-gI(~=(B>9&wxS%fr7AQ)VMuCf- zGHVP13?9XLX9`aqU5=ym;Vyraqa=?HmLF>wXQ(5vhL~On)vLtBitPl3IgM(0OCc%uAS>&ymg)p((u^tuYmSKUw^*~P$MSu022%@=J53>-E)UE zWnIh&t_RQ~ew5KmZ++f}*T`E-|IYj;<_C7eDE@=jE{Az1dGz2#ojT)k^} zG_qT-dMSIZe?&jsI-sgN+C3X*)Kvbv6Z9j{MHfAPLxDy>MO2?M!Mei9*!AH2URnM1 zDKjpdDKl`!X)~_bRsFw%jUW3P19J+eGbTH=y*xS&2?vi(qP7nARfLsd_lFO?1ui=) zp+W6sQ*iI?3*|cN%l-KAk%UIios;9^?m{zV=8~!R4f?<{2vxf7Q?jsh0wE?NOE3KKy#@ME0>=!y#h80O4NbD)CXv8`5+t+sGM3-yf$&no;W0>?V~-^ZDHZc$f@Pf|Jof2 zNql^KAP6!4)OXrR|6IK3%t7jWep~V8?{^oH7v+qqihbtm0o4pU6J5^D z)RFgW))r!|gl2|z=39?zDK4fueu}3AA!HQv>IIHkjxO9BhwAp!5jw2yfYCufcW%bXAh3^;2gMFi# zeWO^_3%?gnk7JSxU#QOVCdFi!k#SZsrp=$Tzx_JMNs(E+p8j{*UYD)hXpuye=KB2H z98q_sUx96^6Sm=WA3k|hWmw$R!ZU+u@`Jg-ovF?8Z{rnW@^YZcVDX509Rd zd`|kB1d5k7$3+!N!=dpAStC}Y@B4Jxt7}P(@JDe|&cs5>gri2~M^AraPVAk83W15i+#qb=4|C=4lt<(z7o1MCvz~+Ap^*;k zWt=!UidySBV{5WKj;~Rz&!^~UCUHZC<7cCKD47K_u0|&GzoRHp{%z3cDNDoAJpQ<+ zT9UQKQtfO<23Sw%=9!%BmM$i&cBB&E@cks$yn^JaQ|*q29K}A7LY72%o;wF<=j`~$ zsvYmt5M!7jy=}4K7O^9qSj%_{ajG@E|@7o2rnBY#jdJ30naq|0{IR@Z52%5J$bToo8FuC=M zQe9Tki(Mk|I^&BfL&G3B!;VxJ#v?1CRmWG>qk-#f9$S~RM*#&q2(w&Jy$P&24w>cr zJbHnStbClhXz~KvCqEw6Xq~aG4*QVZ#pP>mO?=N=o0gY@)cySm2$>1njvq!|lOcWV z@Dlfm%W+7v*hf>dM|?K0Cyo3exdjD%@$H}dIG2CfQ9;&h{E)+GuIB!KWip(QjAFj% zJ;T~pn8qrs+`9=6-LuplFwr*Z7vp5+0%6TvJBpK)Qm0ZtoE?ebv8>5J$wXHbCcG&xH$AQFv;$CdLL)D69DM~zw3$LDE)Olh-sNTw6oD6~ zwNkJ?_4Mcwa>)#Fh5kKM%-em63aG4>W4z>UYiv-u6LS~_LOR4Td1?I3dyryd_^8!x zIi!B<{46+evyh>OJ99#<0}Py=t`O&*j_$?>L42Zm3qXT1&z>z!Rtqo2JCPFvc!SE- zrdynl&3fvI(CHcnf(C702@&8So!N^?$A&lW3p8H`#wEyuDZ%aX8EmpJ=$ysLny$t} z)umrEHxW0^0PrFOb6drDtuGzVY%rS|R1SWAH-t`>sGy76jJ)6fgIfyt3Q!xO3IXju&+K#uMG`3*@fZWPl9{2$r zJI`TNsYCeET`5Pa{b+j@uwIgOwI}k;b0r$?hP+n7JprIzi{Qd)s0wdRodmMyYtv02 zgvk#k$R6K+4=@miB&~g|{v4?vzN%X${ru?z{q&8BUHMM|0mCIWRG`TSq!6BfRO@%I z7-TX8@^ZRoaX#wU^4cZ7g6O8im;71mcQ=)vn;MN|EWg>zsekjHZLrQ}@O7?4B(+7< zKX^Wq;I?6McCp<^(P%y}@tkr-^Ls6+rQi_+bL3Q@4%X>>r)D@7|FJs9)6n0krNfDJ zcGkTmBVNTrR<@eKKcMqAPZ^`h?AQgK4X7hz<9I`F;?dl5bRQ`&8FQNSKq&Tcxlev^ zxR761gu6b(viM)08vQS1&)CM+MEKt`Ic|8F;9UJ>$CP%uMyM^eOXl4RwU6qe`ttHv zAk#L0epKJ!AOy2{+tAR^nmT_x$neF${NnH#7~-8RH)P9_MZX<3r-Y~y+S>~vFgCz~ z5>{SSiPBbYbo0xl{EG6dO+Q%%(K9swHGFY)3~0}0`9~x_XCV81h=lH|a>#Q|?zp0A37Q~bDG`3Au5J0NBP03h5x z-t4ALW-L$*gq2xQME-;PFMruFbpwndl=(}Xl&&KYoR+YWh=9s@yK31GPrzAJ*|PLu4AgeXGGUHQS%N_@ATS5jeQ0R++%dFjL7lPO zmmY@f__m*Dz->U=C^|BZ{W$M%bn1V;yx5EJ^bc@5FUbEgYG_WI^E=xncYsNlWdN7$ zy}BETFD7Y&Sx#jG2yU9L5o$aEhU(_++bLybF~AE~oo?_0>kIP-(f_`lC?mwbdMu*p zR{{GYvwM=2%&;*?&x31mS*Y4}|oGfi(Z(Ko_t3ku|iP zRe&jkZjaec%+n9oZF=Q+{_RXi3w@fPp^m^CJ0)o0#pNjrwd-F*9;587kTk8?G>iGD zVP-$8YftJn=jJeI!Dp`Ads%g|!f>!LF7x{J-HVG0pn1I?i27JhdlTyQWD$s^+@;Sk z6=J;-u0+OX&tzM%VlGeaIjdOK>*D2_;xG z6Rg56^He&wjSNHT#4`qd=}AS*KNh0=Y#UCRx3izkS(BFsee;u25AABE2)n6~t*=c; zFt*o$L)rKRdV9iE;ZL8YW9ARng%#6U_m{>j+@Fx72oM*GnKt6GTR(o$u65YlLP_AI zJapx*w0f?q8J|;|RO+`r3G27srPv40b?|eL$rIaqG-E>%@92{x1t;0s2xG^aIW{+@ zbBmo`7}*u+IP{R5@2*~HsLl&pc}yFhe5S%$%7aKG%NRqGd$tPvnrv=tac~^gkY^sH zhs&u(z$C%o7gUH3r&4+1vQ$y<1MuEQ=HMBi3gbpj4rpG4#*9s5n$fY#?vw ztYVofs!j=0d~RfIpclR;*pH-_gA27xAsWNr-k{#m)o4Mi1|rtjhbO|7j| zbaZ}*VFD%1?XteopP^c6OO0SW2?^W&*UH`bMhYl#^0Z;J3Jv=KWS&%1RMDI!F9RyD za7^M6C3HxmV${K*Amixszmk*pz(4MGj>n2=geQW)9({<6IYH~U?`012YY~ZSmtJTlSc*i>N{P zOz!DG3k_$qsgs%)vQ^aN`KlONDGp}4{y=ejtf&xRE}g@5cX#&-38AH?_Rdl-CbzM$ zv0fFP;!%1gixL<2{}=Hh)A<+h>hkUH#)#rH>W20&Gco}Io&Z&f zjr!?6YX|ku$|{W%KT~%B5dla&`j>XT{h_lEiQP0|WMHTr93=F*yqwhGVPwo?yK&J2 zZ2;_3A%6bYsVSZB$;qg6EG&)F)`&C&kiL|Z)YOPs4mIcAl9T@(oZ~_*&8x?acplC( z@#C3euUbx9Zw-aNu5V+Lt`j>V(M*U-Npw`&*~;Lj&vBgEG+QWjE9Xed@f4OC)wMmh zeXm6t9v$5bEudCK1;KvjwcSo(WM9;wZJ!pxz{0W!GQ#*lw14T>oLE-#$7>8E z7i9csH555-najKjoPim<0fe~1GT(^1LUO-pJ@nojp*;pF$8$$H=%FBXp#janpm#XD zdjBLYonQ`WSy_Uaw7Yk_yu3hlGWdp6TwFZx9^V(1s-qb(?Joto2FwhqN*r6xQC(?Cx^0@7?4CU>_Hfn{n z6%Xjc$t~f(|GJ0bn;0yLm%C(MU#-le zHaJVPi8wZ;{|>Gk&tGv>Xv&FhH*7P^hyIg==YzJS&&BS?NKdr1wA6gE>iqYE#hDpB zl$Dhg=hduYDLJ`$=&A_KlmZ}Bt#D?Gh=_nLG1LJP?(P)?wSGLGOnLy} zpxbNH&NbMTrOt!$l|@@#R`#~2FP1@j4AxaUGjn*1&+8 zrE(7!x-p=;)$a(v$KiWXtW91U-CQHEBU09;|Cx)6`}pM+zvARX|4{D`3;kQd zhJzGQsq&-D%|Zz3Mvxa?4f5T@`n}ad5ynTwS5m*)1ieyD&hTI|{vzkmnRr`UTQGyV z2G@;*g%wFcoNW;@5qDzp3j4&Dwe0;Qr@UF-gap6-OGCa|7ppzJeCT%Wy&!Fpso#Qw&1(zm{b@lK=PI?JfZGG=x? zhp#w*6CXY`%RPyT;`_<(q&Z)D?K)-FyMG_$V5JU+aW>KFjT+uKW*Ag37>kR4HNYF= z^_z1Q!S$Y&==Jm5$ZQHMnJ|&q(q5I)M-f@FH;i#qa3#@#IechF%jjt#+V10qh7EgJ zfztZsW&7fEDL-_cr2oUPP+A2t%hWM|uF|jIC6t=Te8apj4IasyDDYP>-Bp9$dMl7Q zt3Q`tL*Tf>5?WQht=9mg=>A9F1~JRknf?2;Ei`GiVN9^ASpANpZvf1@wAR|?r)BqB z?{BAN+E{O9-UeWKqc?FeKlg(*^uRP(Ff;J@S5XBH5Q^AhOxg|v`fTz<5A z%7^xwb&(egv4%CDyurWZ&Altl(fRTb1FgS{56>PK+_3+9$(;R-E z&1vBJ+3KlCvJA&ZXg$uXs#csId!k%F#F$-|bj86j5x*nAysWisJr73d5NVDc5=^X~mF>#=|Tpo5Pzc zfV>{U+(-6#;n+zVH-OHer5*u8XijWIh024q!2WA_mC!23lbV1zX2EzIT-6R{aVOj| zu>tV`yX~^Jt|<$pvuk-vMEbb^Dr3?EidTo)AdK3Vtr{Bl!!7NSdBs|8`gr=tfxVod0M! z;Prft93RA>ka5Nz@dYEUw6lwEb}l*(C&~YZ1Tbbpb{NK};AYEVa1p_QLL(v2E(n`w zS{TN*{au(hz4HDgX|kcE(f)v~EKSK~8SjOyGNlqlmNSkGh^ldL9&;b1aJK_?7`?%06wkkBSJgFUiA>x_=jxA)X1V^Y)gE!4i!Py+v7(01e@vRXb75Ke(5J)5=Ohw6B$BbuL% zu8>$m^(utPu+6oFdGS(SO>4)TI|~Z(4`}`lBK-QlDSMNb->SW%^}j^%erHgk@To~y zhu;o(gZwT4p1PAV8^rX?XAB$#*)1gK`r0jX(DXtn@wCQAW(B`ZUKTnn+Pv;RrfAeY z%xek8@MqkZy22b7bY!OSMXC5@zRp_e *eL3AE7Pd4Dq^)s%Eu0($^#%>>$#GBF z`h5LOV)D|`N<~;7O^M@|SZ{7(Q;a?A!x)(VT9p_g86tV;hIQ!Pfm32t)yj7wg=h2d z{G-2L6PKW~+Z}1IjVHSe8&S^~TxNo5E4HPX321LCFDB3%^|hi8Is8UTM!ne6A?g$w z!%cp{kRpB$CcebP);`_d)te8L-&-bm+kZ>?vV+z}9?M7$lSd=or6By$!$g?&R?C(> z5Bnzey0jmL>F?Qqu{GpuQsmu9N?8xLi_Rwfo@;2{UgNh-IEo&2ubiCQ(obDugw*pP ziun}|{#1ior~Nl&BVk_(Wiiihh`Mf{eJ5Dm`EXh4{gpAgN9k{NDnuDi7_lXTlvpkp zLzZQN^|A|ka(@s~2#kgvBn92GW5p?bgdA&naYt5#Dqk|*CEk2U{jJT1UY&KNeE1Qs zF3cNQmDQrF=I8>#s?onW5qeF;lSu0e7bmV0QNDCS60NP~S%qfwrLW_euF4IG2ZAvoWI4OU=3*;rtV#(jz> z%+nFm_OPQ_clpFUbC6HNUnNXdeiq0(c`3>py#=Idbyb zR)W@Zv>Q91zD3>4E}C=m&d;6`Oy$kTDu2@=Sp;`SQbRGUgzwh;8KS}Zr6#r_ikyrm z!Mm#LieoW18Z47{-6-0dJUGG}v6=cm98OLYiV~)`2eEA~^zJ^86I2n(x;j2`%v)lL zlg^-QM0lT)k_YPpDlYnUyxs|4BLc5RuYmTzc#qG`qM}6IIm8*g^7)I;ZyL!(l$4Vg zvs$%F z6L=j65;FWUT{~Q#-gU3o4_x2ZEao;`pvtm#=th$W)h5AIfVneK&TYHd^;AkCp8G^s z8`R!yH~Q>bwq(`~+zO%pvV(>zm0nETP)$$Why*jpg5U7}<1w z8vTI*Cp1w6*^_&}eg$_DW|kc+75ec;Bm2wy3U9Iulb;8pYZg`9vl&@#UDEq2ej3>SE>&{DhT0b2_ch8drqW;T zY|QznLp|!y*HQkLtk}x4L!LW0q1U@pWl%iI(4JcsS#P*gTUtt7afl@g)y6z808#me z9cvC;{H2XGcCnNMP5~xRN>V!h_hTG>$W#bC^!}~E&u{2|{btAiKQCu@Fi#DpR8+(j znhn}c{Xx~x&;Z`g5V)k0vK4k1|7i-66Aaq_{N;-XDA?c^H0DFR&Zs1V?@edqRM|SJ zA}hyA?NY%;#yFOH(OFnnKz+W{pFs#ubTG?+)VyvQpJcVkv`~*d8d}Wxh1bx~tT)E^ zXqAuw^Bz`C;+$Y;y+9R56eK)np4{y>Y+ki87zcXjU!xrff@V$>rNaM&(RmQJo z7qfA^Ht&Hp*4XvftUR1Z%mNd_31xuJk(={&J)u{R8y)9BXpPRF$;m{hfd2md`&On> zBobTsa>&YamlTE)`=3)IzX`z`xWG9e3Zow|=QTf59}p+(l>pRhFX--#B9U8~vA&+s zZ|dNPQ%x{GKaWhy+~c)IN$!?{&wRL!`4>iLhJS#--j6pNu{z|$r%hlnIJy7pS6kNp z`?VIFa>Q6+|Cq;9yKi>H@zus~nKiYy*N=|I!gwY!ytS5B=g&xh(fdwlab$H9S1LD@7XbI@UdyWI>x6 z3QX!+LIY{S0~p%WhcO7CbaP?a0(8AFOX=WfGV>6`#VE*dM3Llct&65-rF>_z6qkQ` zd+`Vf2}!t2U+IrtoSzOCS&)VZozMl5^1y^5mpdrb5k<26FC~({w2AJ2lSF-uyT4zf z2}>Vv3~LqxDhT=QlC1XH+GJFQaG$TB5`6aT7U+4P3J*(2=zyNN3pDIa`qDy0Iedg7 zNL0Xj;WnXlG`+I&%-NY26on5N7+T>Z@Z$NMu(Y+c-90?E4-OheN6VFrKa)-9`gjlj^Va2 zJ*}KxU(YfcoSXu6J}i|2RLw*|60Z9z;!wPT1z>d&ophM$EOx}#j*pWmC@83_@6Xg2AkD`_5rffc?L2*w~1}r8(*~qH8wUb{u+e>zow?9Xb{+9l`N#(2T>{Jiv-@9 z!YbOS9;23ycz&dDqot*_w=o_|4%0%<9cFIBXlnNM2{1Rcjg7t_td&m?Snh(O8~W2` zXqTlc3C3w)@FVQe7wz{jQ2>>&VXSX>TwEKdLZVo8--j`TY2V5p4y@7nU778P8!>F| zs3g6Efrz57T)k@2|LZRFBIdO(PY7W(%4=C)Nkd?eb$_M*HYxXQIA&j?+ujY=7Zw(j z%y^sia+h9w2JJxGU0Vl-dcE8sE#*t{?&ovu>!2eqI`;xu z7pOkp7#T5ePN2cB>>xBm_38NL613N*jslbTf-o8H-$wxfn2AYxR#r)gO6_N!QkW}( zn>2CL6b#i=I1V?3g+Z_w)`HdOvl4CX?>p$do{Lu1yeXabi8 zJ4$EMSYP_LLqI0pz`)Q4AA9xswarQ&J=Q&5ol}OVmhvz$fHX*V7dv5KRNB<^5e)yT z-KOI1?(RmIi}&xZJYrywmXS#YReDKjDPK9K4KAECx5Y>u408iH036vDuv~D|IL#Q| z=b%;xEfk`?0*@FRT38Cy{k@{_<@({Q8y)ahAt2*_T+V1i8BqThND3G;!3M$+xEOZ( zrRPl6CDIl{zuy5#3FF(E$%3GeqC^eI%p;XEkqFfW9MXQU_SzTDvwk|_M8YXupW?I(d ze#X+}`W9#ql zpFNGh@&q=h!cM+8y=HcHR^jkHkphXsq5k0AZ_v@Ho9-dvv-ATGkAP8!D_5WkArWR9 zLRojNuW~u4=Yp}Yr3+|~tpx1wi9A#o^`a&#zTeZqX0$Jzfs3@q7UjMP3^$)1hpPlWC_(poR{^4KmnunQ5 z%g7VHD&=1iLQ$UB%m%50$fd!YU1xm$^Gjt5Bi?*If4ovo*WS@_FsH-P>pTF3g{7A> z=6b~+hd30Hb#vW>x9;D!v9gi@i&=q5==_@*kD*cT9UJTE6>@J7d4ZN~qNqGvTB;41 z09{4{cE{~~u`7@1txj}&a1E+LSH1C%!(0bvXYf4Z1lM`z2Z5)LuG#IMmCdeyun&0$ zk4+_Hb+e`UbC|RfTBY17o#(Bs2}onzA(MY2ls2UP;hc|_UMB#$8rDM>0>XbPn%MXx zC84r9S}jzBX5hw+8&iDxtBEcZ5C3{}RM7(Ig~Q-#>FQFkt^ity7myxcc^n1J?8rwp z3An__f`Xl0MFDR?(tl(fuf_A=2fo(;m&cq!@*^UBKjsnIMn-o)AIm%Sy9$EMe&lDO zh6G!hGLOmdNxTV>T_Ch~6nKj<)xUD}YVqr5_rB9wspGsKN^*ES#vYRp0klO-zz)d( z1lkMo3K-qfLdN$%*Nz^0x{}90T~bZNa=F!JAx1dvWxb%fzVZ>O!W3`C#ktO?G^H(K zta9tlo&C5FWQbF+QxN|UT7;4p@dyC!$mv^-6{)bLYp61PF7KI05)92hd&S57EOt5fle!8G+ zgPvZF)$bH=8Nj=Wf{dPMhTxH8h#?R}A_`7|0`|V$JsD8hS(_9Lg2QzB1f<1)C=qFC z+Tc1LK_|4!d&)J?n^+#hEjk2x$~@ zC1Rfly%Qq^Jrm%f9QE_5!hB=DwRJ^fV>}{!TvF2S*a=|dEBPF}lEKEbNnAcj6F^PS z3cEo|2(5%7(h+2!;n;#eF}*M?3IB{n=Nomh7z_q@9=n`$k0t0F{hUfoLsUg~ejGC) z=&I!bJS%>&j`xf;2cTSS9%=%zJp?&!1}eEnwuFpVe0GRVhgp_ioau^g_ILw8H89>M zz@9*7p7yv9o=yxM+ds0t=luw?&kI`$rL=He?Dp)D!u6fR7@_=;dc*D8UoI|;g<(a> z9hEVjyO>5K;_~ueZQ3MDrBVmP$zrmd5ZXKZcN}JY`T@bf^)9;Oh+Ip$ep?FN1%U=4w`7sLgM>&Fj+ zPrZ9+@h_eOeD1fk9e~Ynl`T1|o*3ZvtmY7MM76ai5K4gQ&$b>7zyb+-)*TZ}XZwXW z&b@vvYaB2d(mc8H=4R3ik3Q3qwFZtYvmO*AsQySmfHj=yF5U%*0+-44Y~PN$!fjYJ zYgD!n{47Vv-XTTT(ASTLjS#e27P_FPat&JmS6ouwOm7R8;b0Dy$xO{P7SXALbZ%+3 zSv7PdQ1@})`vG9{x;eq=1{xZbEj5NvEaD(zVq@j-^inCH+2pIYpsX98sL^#mp3HDa z1G3^cbS1O~@L2XeA9Y)CArvR^_z!ZcHh%r}6E;;rq2#!0mRWwgXEg!>j;*&L=&?%~ zEwX}$VzlCA`Co?UGO(Ehzrj}xJ2+*!nQ01#SdG6bj1^BRqGBP)4X$>*sp$|iuy1*x z|Af{5MDw1d_Gei)aS_}%g28mKsrK>z`7j3p{!JpZ<^ z#gNWLcE?k7Y$*UF%VV|bw0(Y&r!+s%K|5os(T+CK$TZ#mWI?EZKQ!D$A&}K|?jN z!nH^-ft!+ybq(F!-To}jXi%2f+ZybW*@i)P^$j&K#;^O}r+C=Dg_)j$5r>Ox4r|5D zkM)V*?yo?;fFnwoUyd7^o2?QGF=Avm+1c|@Y{iguKW%MCm|+WkozB9oPWkpwU>5n0 zpzys=&>ajmWbRYX_FKRrhy{yzO01Zj$Bv)ys1!WN^@IdQ_Dt63-R#lSWum&e zQ}14;UR5#tGv6TKJyP#-ohdJu(6Z_wW&>`eWpJJ4FIt}>l|&*LN2JCk&Ru>(UjoRj zmY0FUNjnc&zbXpQ<(5pVV4!~r?^aGtZK$gHf!r@b2+3XV`-R~)6}1TM02xGAL&Dje zZEjdUp{1|?1*{Q?oXatI!WiRU!~};!zX8{7|H8XlhFLW^$sqRo#Z^-U|t95jgRX`KCc9 zQNH^rCoj&z*v-(V1u3LUSumku4XfB*2`;kKDMvy_5q;wSA7}0i|=HKxC?(4<;D2F4 zsk&TLw3}cluojRyQ136dKx+hMx&bHv8?d(P>!Us5I#7F;H&Aj^7vuYtTaH*bJFk<8 z42O624iDD=pbX+FvKJI&q{Ze-CWHe50V}G+FV?JiVWV4OdE&%Z_;`X#=v}}q=>+<+ zg46X8+CK+AXh{?l<^(7VIW4_G7ejGCnh^ku#t&Z zt?=3|(Wn9`#QNKBHPqF^Nv1s{bqS?k-Z6npcjz3uGp;nI341C3qIA>cS~M*<2AeJL zd4}7DfWSaLJ5`J##A8`CqCXT-B5A4#$RrbAkcp*;8-;s-g;$eUq(djWR}2k#EgI=0 zF6 -# TensorFlow Lattice: Lattice modeling in TensorFlow - -__TensorFlow Lattice__ is a library that implements lattice based models which -are fast-to-evaluate and interpretable (optionally monotonic) models, also known -as __interpolated look-up tables__. It includes a collection of [TensorFlow -Lattice Estimators](#tensorflow-lattice-estimators-walk-through), which you can -use like any [TensorFlow -Estimator](https://www.tensorflow.org/guide/estimators), and it also -includes lattices and piecewise linear calibration as layers that can be -composed into custom models. - -Note that __TensorFlow Lattice is not an official Google product__. - -[TOC] - --------------------------------------------------------------------------------- - -## Concepts - -This section is a simplified version of the description in [Monotonic Calibrated -Interpolated Look-Up Tables](http://jmlr.org/papers/v17/15-243.html)) - -### Lattices - -A __lattice__ is an interpolated look-up table that can approximate arbitrary -input-output relationships in your data. It overlaps a regular grid on your -input space, and it learns values for the output in the vertices of the grid. -For a test point $$x$$, $$f(x)$$ is linearly interpolated from the lattice -values surrounding $$x$$. - - - -The above simple example is a function with just 2 features, and has 4 -parameters: 0, 0.2, 0.4, and 1, which are the function's values at the corners -of the input space; the rest of the function is interpolated from these -parameters. - -The function $$f(x)$$ can capture non-linear interactions between features. You -can think of the lattice parameters as the height of poles set in the ground on -a regular grid, and the resulting function is like cloth pulled tight against -the four poles. - -With $$D$$ features, a regular lattice will have $$2^D$$ parameters. To fit a -more flexible function, you can specify a finer-grained lattice over the feature -space. Combined with an efficient $$O(D log(D))$$ interpolation, lattice -regression gives you __fast evaluation times__ and __arbitrarily complex -functions__. - -Lattice regression functions are continuous, and piecewise infinitely -differentiable, but they are generally not analytically differentiable at the -lattice vertices themselves. Still, they tend to be __very smooth__. - -### Calibration - -Let's say the preceding sample lattice represents a learned *user happiness* -with a suggestion of a coffee shop. Furthermore, assume the following: - -* *feature 1* is a baseline coffee price. -* *feature 2* is the distance to a local coffee shop. - -We want our model to learn user happiness with a coffee shop suggestion. The -distance can be defined from 0km to 30km and baseline coffee price can be -something from $0 to $20. - -TensorFlow Lattice models use __piecewise linear functions__ to calibrate (or -_normalize_) your input features to the range accepted by the lattice: from -$$0.0$$ to $$1.0$$ in the example lattice above. - -The following diagrams show examples of what could be the calibration of the -price and baseline coffee price using 10 keypoints each: - -

- -All TensorFlow Lattice pre-made models (`Estimator`'s) use calibration of the -features: the input (the $$x$$ axis of the plot above) is set to the quantiles -(so data will be +/- evenly distributed on the keypoints), and the output ($$y$$ -axis) is learned along with the lattice(s). - -Notice that the calibration also handles the negative correlation of distance -and _user happiness_. - -### Ensembles - -If you have $$D$$ features in a lattice, the number of parameters (vertices) of -the lattice will be at least $$2^D$$. (To be precise replace 2s with the size of -the grid for each feature.) As you can see lattices don't scale well with the -number of features. - -TensorFlow Lattice offers ensembles of lattices to overcome this limitation. -That is, several "tiny" lattices are combined (summed), enabling the model to -grow linearly on the number of features, albeit exponential on the number of -features in each of these "tiny" lattices, but the number of features per -lattice are typically configured to be small. - -The library provides two variations of these ensembles: - -* __Random Tiny Lattices__ (__RTL__ for short): an arbitrary number of - lattices of dimension $$D_l$$, each including random $$D_l$$ features out of - the total $$D$$ input features. - -* __Ensembled Tiny Lattices__ (__ETL__ for short): As with RTLs, an arbitrary - number of lattices of dimension $$D_l$$ is selected, but the input for these - lattices are linear combinations (initialized randomly) of all the $$D$$ - inputs. It is more flexible than *RTL*, but less interpretable and may take - longer to train. - --------------------------------------------------------------------------------- - -## Why TensorFlow Lattice ? - -You can find a brief introduction to TensorFlow Lattice in [Google's Research -Blog post](https://research.googleblog.com/). - -* __Interpretability__: the parameters of the model are the output at the - vertices. - -* Powerful: __abritrarily complex__ functions with __fast evaluation times__ - (in comparison to some equivalent Deep Neural Networks for instance). - -As shown in the following figure, in real world usage, the training data is -often a somewhat biased representation of where the model will be applied: - - - -TensorFlow Lattice provides the following types of __"semantic -regularization"__: - -* Lattice resolution: the number of vertices in your lattice allows control - over the flexibility of the functions that can be learned. - -* __Monotonicity__: You can specify that the output should only - increase/decrease with respect to an input. In our example, you may want to - specify that increased distance to a coffee shop should only decrease the - chances of the coffee shop being a good one. (See the illustration below.) - -* __Graph Laplacian__: Outputs of the lattice/calibration vertices/keypoints - are regularized torwards the values of their respective neighbors. So - corners (vertices) of the space that sees less training data will fit snugly - with the neighbors. - -* __Torsion__: Outputs of the lattice will be regularized towards preventing - torsion among the features. In other words, the model will be regularized - towards the contributions of the features being independent of each other. - - - - - - - - - -
- --------------------------------------------------------------------------------- - -## TensorFlow Lattice Estimators Walk-through - -TensorFlow Lattice library provides generic models formatted as pre-made -[estimators](https://www.tensorflow.org/guide/estimators), which we -hope will cover the typical use cases, or serve as example for those creating -their own models. - -This section provides a walk-through of how to use the pre-made estimators to -train a classifier of [Census income -dataset](https://archive.ics.uci.edu/ml/datasets/Census+Income) using TensorFlow -Lattice. The full code used in this section, which includes some more details, -is in -[examples/uci_census.py](https://github.com/tensorflow/lattice/blob/master/examples/uci_census.py). - -If you have trouble with the 'tf.estimator' interface, consider going over the -[TensorFlow Linear Model Tutorial](https://www.tensorflow.org/tutorials/wide). -All of the data parsing and formatting is very similar. - -### UCI Census Income Dataset - -For this walk-through, we will use the [UCI Census Income -Dataset](https://archive.ics.uci.edu/ml/datasets/Census+Income). You can -download the CSV train and test files directly from these links: - -* [adult.data](https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data) -* [adult.test](https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test) - -Please save the datasets into a temporary directory (for example, -`/tmp/uci_census`) and change the `--test` and `--train` flags to point to the -files when running the code that follows. - -The data is available as CSV, and we use [pandas data analysis -library](http://pandas.pydata.org/) (`pip install pandas` on most platforms, -maybe requiring `sudo`) to make the parsing easy. - -The `tf.estimator` models use an input builder function, usually named -`input_fn` which is reponsible to parse data and convert into `tf.Tensor`s (or -`tf.SparseTensor`s) with batches of data. - -Our `input_fn` functions look like the following: - -```python -import pandas as pd -import tensorflow as tf -import tensorflow_lattice as tfl - -flags = tf.flags -FLAGS = flags.FLAGS - -flags.DEFINE_string("test", "/tmp/uci_census/adult.test", "Path to test file.") -flags.DEFINE_string("train", "/tmp/uci_census/adult.data", "Path to train file.") - -CSV_COLUMNS = [ - "age", "workclass", "fnlwgt", "education", "education_num", - "marital_status", "occupation", "relationship", "race", "gender", - "capital_gain", "capital_loss", "hours_per_week", "native_country", - "income_bracket" -] - -def get_test_input_fn(batch_size, num_epochs, shuffle): - return get_input_fn(FLAGS.test, batch_size, num_epochs, shuffle) - - -def get_train_input_fn(batch_size, num_epochs, shuffle): - return get_input_fn(FLAGS.train, batch_size, num_epochs, shuffle) - - -def get_input_fn(file_path, batch_size, num_epochs, shuffle): - df_data = pd.read_csv( - tf.gfile.Open(file_path), - names=CSV_COLUMNS, - skipinitialspace=True, - engine="python", - skiprows=1) - df_data = df_data.dropna(how="any", axis=0) - labels = df_data["income_bracket"].apply(lambda x: ">50K" in x).astype(int) - return tf.estimator.inputs.pandas_input_fn( - x=df_data, - y=labels, - batch_size=batch_size, - shuffle=shuffle, - num_epochs=num_epochs, - num_threads=1) -``` - -### Preparing `FeatureColumns` - -TensorFlow provides `FeatureColumn`s as a way to select and describe the -features used for a model. Numeric features require no transformations; we need -to list the known valid values of categorical features. - -See more details in [TensorFlow Linear Model -tutorial](https://www.tensorflow.org/tutorials/wide). - -TensorFlow Lattice pre-made estimators will take any of the currently supported -`FeatureColumns` or alternatively the raw columns coming from the `input_fn` -function, if they are properly numeric already. - -```python -def get_feature_columns(): - # Categorical features. - gender = - tf.feature_column.categorical_column_with_vocabulary_list( - "gender", ["Female", "Male"]) - education = - tf.feature_column.categorical_column_with_vocabulary_list( - "education", [ - "Bachelors", "HS-grad", "11th", "Masters", "9th", "Some-college", - "Assoc-acdm", "Assoc-voc", "7th-8th", "Doctorate", "Prof-school", - "5th-6th", "10th", "1st-4th", "Preschool", "12th" - ]) - … - # Numerical (continuous) base columns. - age = tf.feature_column.numeric_column("age") - education_num = tf.feature_column.numeric_column("education_num") - capital_gain = tf.feature_column.numeric_column("capital_gain") - … - return [ - age, - workclass, - education, - education_num, - marital_status, - occupation, - relationship, - race, - gender, - capital_gain, - capital_loss, - hours_per_week, - native_country, - ] - -``` - -Note: unlike DNN pre-made estimators -([DNNClassifier](https://www.tensorflow.org/versions/r1.3/api_docs/python/tf/estimator/DNNClassifier) -and -[DNNRegressor](https://www.tensorflow.org/versions/r1.3/api_docs/python/tf/estimator/DNNClassifier)), -TensorFlow Lattice pre-made estimators accept sparse `FeatureColumn` without the -need for embedding them. - -### Calibration: Saving The Quantiles - -TensorFlow Lattice requires proper calibration of the input for its lattices -(see section on [calibration](#calibration) above). - -The current default calibration algorithm requires quantiles information about -the data on which it's going to train. This can be done as a simple -pre-processing step. - -The following code snippet from our example does that: - -```python -import tensorflow_lattice as tfl - -flags.DEFINE_bool("create_quantiles", False, - "Run once to create histogram of features for calibration.") -flags.DEFINE_string( - "quantiles_dir", None, - "Directory where to store quantile information, defaults to the model " - "directory (set by --output-dir) but since quantiles can be reused by " - "models with different parameters, you may want to have a separate " - "directory.") -… - -def create_quantiles(quantiles_dir): - """Creates quantiles directory if it doesn't yet exist.""" - batch_size = 10000 - input_fn = get_test_input_fn( - batch_size=batch_size, num_epochs=1, shuffle=False) - # Reads until input is exhausted, 10000 at a time. - tfl.save_quantiles_for_keypoints( - input_fn=input_fn, - save_dir=quantiles_dir, - feature_columns=create_feature_columns(), - num_steps=None) - -def main(argv) - … - - # Create quantiles if required. - if FLAGS.create_quantiles: - if FLAGS.run != "train": - raise ValueError( - "Can not create_quantiles for mode --run='{}'".format(FLAGS.run)) - create_quantiles(quantiles_dir) -``` - -Note: This only needs to be run once per dataset, and can be shared among -different models that use the same data. - -Note: This information is only needed for training. During inference -(production), the model itself already contains all the information it needs, -and doesn't need to read this anymore. - -Advanced: If you know the range of input, instead of using quantiles, you can -provide -[`uniform_keypoints_for_signal`](../api_docs/python/tensorflow_lattice/uniform_keypoints_for_signal.md) -as function initializer, which will create calibration keypoints uniformly in -the given range. Or you can provide your own keypoint initializing function. - -### Calibrated Linear Model - -Calibrated linear model is the simplest model type offered in TensorFlow -Lattice. It calibrates the input using piecewise-linear calibrated functions and -then linearly combine the inputs. Using it is trivial, if you are used to -TensorFlow's `Estimator` framework (see [Module -tf.estimator](https://www.tensorflow.org/api_docs/python/tf/estimator)). - -To create a calibrated linear model, you need to specify features in -`feature_columns`, the model directory in `model_dir`, the ["run -configuration"](https://www.tensorflow.org/api_docs/python/tf/estimator/RunConfig) -in `config`, and in `hparams` the hyperparameters settings in the form of a -[`tfl.CalibratedLinearHParams`](../api_docs/python/tensorflow_lattice/CalibratedLinearHParams.md) -object. - -All parameters are optional; see more details in: - -* [`tfl.calibrated_linear_classifier`](../api_docs/python/tensorflow_lattice/calibrated_linear_classifier.md) -* [`tfl.calibrated_linear_regressor`](../api_docs/python/tensorflow_lattice/calibrated_linear_regressor.md) -* Configurable hyperparameters in - [`tfl.CalibratedLinearHParams`](../api_docs/python/tensorflow_lattice/CalibratedLinearHParams.md) - -Calibration can be forced to be monotonic and regularized in different ways. It -also supports special casing of __missing values__ (see `missing_input_value` -hyperparameter); that is, the calibration of missing values has its own -parameter that is learned independently from other values. - -An example of code that stitches this together is presented below. For now we -present only the default hyperparameters. The next section covers the special -TensorFlow Lattice hyperparameters, and how to change them. - -The following code shows how our `create_calibrated_linear` function gets -called. It hinges on creating an `Estimator` object, and then either training or -evaluating it. - -```python -import tensorflow_lattice as tfl - -def create_calibrated_linear(feature_columns, config, quantiles_dir): - feature_names = [fc.name for fc in feature_columns] - hparams = tfl.CalibratedLinearHParams(feature_names=feature_names) - return tfl.calibrated_linear_classifier( - feature_columns=feature_columns, - model_dir=config.model_dir, - config=config, - hparams=hparams, - quantiles_dir=quantiles_dir) -… - -def create_estimator(config, quantiles_dir): - """Creates estimator for given configuration based on --model_type.""" - feature_columns = create_feature_columns() - if FLAGS.model_type == "calibrated_linear": - return create_calibrated_linear(feature_columns, config, quantiles_dir) - elif FLAGS.model_type == "calibrated_lattice": - return create_calibrated_lattice(feature_columns, config, quantiles_dir) - elif FLAGS.model_type == "calibrated_rtl": - return create_calibrated_rtl(feature_columns, config, quantiles_dir) - elif FLAGS.model_type == "calibrated_etl": - return create_calibrated_etl(feature_columns, config, quantiles_dir) - elif FLAGS.model_type == "calibrated_dnn": - return create_calibrated_dnn(feature_columns, config, quantiles_dir) - - raise ValueError("Unknown model_type={}".format(FLAGS.model_type)) - … - -def main(args): - … - # Create config and then model. - config = tf.estimator.RunConfig().replace(model_dir=output_dir) - estimator = create_estimator(config, quantiles_dir) - - if FLAGS.run == "train": - train(estimator) - - elif FLAGS.run == "evaluate": - evaluate(estimator) - - else: - raise ValueError("Unknonw --run={}".format(FLAGS.run)) -``` - -### Hyperparameters setting - -Each of the pre-made estimators offered by *TensorFlow Lattices* is controlled -by a set of hyperparameters. Some are shared among different estimators, some -are unique. All are documented in their definition. - -* Calibrated linear models: - [`tfl.CalibratedLinearHParams`](../api_docs/python/tensorflow_lattice/CalibratedLinearHParams.md) -* Calibrated lattice models: - [`tfl.CalibratedLatticeHParams`](../api_docs/python/tensorflow_lattice/CalibratedLatticeHParams.md) -* Calibrated RTL models: - [`tfl.CalibratedRtlHParams`](../api_docs/python/tensorflow_lattice/CalibratedRtlHParams.md) -* Calibrated ETL models: - [`tfl.CalibratedEtlHParams`](../api_docs/python/tensorflow_lattice/CalibratedEtlHParams.md) - -TensorFlow Lattices' hyperparameters classes are slightly different from -[TensorFlow standard hyperparameters -class](https://www.tensorflow.org/api_docs/python/tf/contrib/training/HParams) -in that they accept global and per-feature parameters. For instance, in our -calibrated linear model on the previous section, we defined the following -default values: - -```python -def create_calibrated_linear(feature_columns, config, quantiles_dir): - feature_names = [fc.name for fc in feature_columns] - hparams = tfl.CalibratedLinearHParams( - feature_names=feature_names, num_keypoints=200, learning_rate=0.1) - hparams.set_feature_param("capital_gain", "calibration_l2_laplacian_reg", - 4.0e-8) - hparams.parse(FLAGS.hparams) - _pprint_hparams(hparams) - return tfl.calibrated_linear_classifier( - feature_columns=feature_columns, - model_dir=config.model_dir, - config=config, - hparams=hparams, - quantiles_dir=quantiles_dir) -``` - -The preceding code uses different default values for the following -hyperparameters: - -* `num_keypoints` -* `learning_rate` -* `calibration_l2_laplacian_reg` (l2 laplacian regularization for the - calibration) for the feature named "capital_gain" - -Notice also the call `hparams.parse(FLAGS.hparams)`, which will parse a string -with a comma-separated list of settings. Feature specific values can also be set -by prefixing the parameter (that takes feature specific values) with -"feature\_\_<_feature\_name_>\_\_<_param\_name_>". Notice that the -separator here is double underscores ("\_\_"). - -For example, the following invocation sets `learning_rate=0.001` and for feature -_capital\_loss_ it sets `calibration_l2_laplacian_reg=1.0e-5`: - -```bash - -$ uci_census.py … --hparams=learning_rate=0.001,feature__capital_loss__calibration_l2_laplacian_reg=1.0e-5 … - -``` - -We define this simple pretty-print function in our example: - -```python -def _pprint_hparams(hparams): - """Pretty-print hparams.""" - print("* hparams=[") - for (key, value) in sorted(six.iteritems(hparams.values())): - print("\t{}={}".format(key, value)) - print("]") -``` - -### Calibrated Lattice Model - -Calibrated lattice models first calibrate the input with piecewise-linear -functions, and combine them into a lattice (see [Concepts](#concepts) section). - -Calibrated lattice models also provide: - -* __Enforced monotonicity__: in the calibration (can be increasing or - decreasing), and in the lattice (you must also set the calibration to be - monotonic and enable lattice monotonicity). Both can be selected per - feature. -* __Missing value handle__: missing values can be calibrated automatically for - some special value or can have their own value in the lattice. Controlled - per feature through the parameters: `missing_input_value` and - `missing_vertex`. -* __Semantic regularization__: rich set of regularization that can be applied - independently to the calibration and lattice. Can be set globally or per - feature. See their description in the [Concepts](#concepts) section. -* Flexible size: lattice can easily be adjusted to different granularity per - feature by setting `lattice_size`. This allows it lots of power to - aproximate any function. - -Limitations: - -* __Scalability issues on number of features and lattice size__: the total - number of vertices (parameters) in the lattice is the product of the - `lattice_size` for each feature. Your models are gated by available memory - and parameters update speed. To stay within reasonable bounds, don't use - more than 14 features (or 50,000 parameters). If that isn't possible, use - the more powerful [Random Tiny Lattices Model](#random-tiny-lattices-model) - or [Embedded Tiny Lattices Model](#embedded-tiny-lattices-model). - -Calibrated lattice models are available as classifier or regressor by -[`tfl.calibrated_lattice_classifier`](../api_docs/python/tensorflow_lattice/calibrated_lattice_classifier.md) -and -[`tfl.calibrated_lattice_regressor`](../api_docs/python/tensorflow_lattice/calibrated_lattice_regressor.md) -constructors. - -Documentation on all hyperparameters is in -[`tfl.CalibratedLatticeHParams`](../api_docs/python/tensorflow_lattice/CalibratedLatticeHParams.md) - -Extract from the -[`uci_census`](https://github.com/tensorflow/lattice/blob/master/examples/uci_census.py) -example: - -```python -def create_calibrated_lattice(feature_columns, config, quantiles_dir): - feature_names = [fc.name for fc in feature_columns] - hparams = tfl.CalibratedLatticeHParams( - feature_names=feature_names, - num_keypoints=200, - lattice_l2_laplacian_reg=5.0e-3, - lattice_l2_torsion_reg=1.0e-4, - learning_rate=0.1, - lattice_size=2) - hparams.parse(FLAGS.hparams) - _pprint_hparams(hparams) - return tfl.calibrated_lattice_classifier( - feature_columns=feature_columns, - model_dir=config.model_dir, - config=config, - hparams=hparams, - quantiles_dir=quantiles_dir) -``` - -Note: To see how this function gets called from `main`, see [Calibrated Linear -Model](#calibrated-linear-model). - -### Random Tiny Lattices Model - -Calibrated "Random Tiny Lattices" (RTL) models, like calibrated lattice models, -first calibrate the input with piecewise-linear functions. But then it combines -them in an ensemble of `num_lattices` lattices built with inputs from random -features (`lattice_rank` input features per lattice). - -Extract from the -[`uci_census`](https://github.com/tensorflow/lattice/blob/master/examples/uci_census.py) -example: - -```python -def create_calibrated_rtl(feature_columns, config, quantiles_dir): - feature_names = [fc.name for fc in feature_columns] - hparams = tfl.CalibratedRtlHParams( - feature_names=feature_names, - num_keypoints=200, - learning_rate=0.02, - lattice_l2_laplacian_reg=5.0e-4, - lattice_l2_torsion_reg=1.0e-4, - lattice_size=3, - lattice_rank=4, - num_lattices=100) - # Specific feature parameters. - hparams.set_feature_param("capital_gain", "lattice_size", 8) - hparams.set_feature_param("native_country", "lattice_size", 8) - hparams.set_feature_param("marital_status", "lattice_size", 4) - hparams.set_feature_param("age", "lattice_size", 8) - hparams.parse(FLAGS.hparams) - _pprint_hparams(hparams) - return tfl.calibrated_rtl_classifier( - feature_columns=feature_columns, - model_dir=config.model_dir, - config=config, - hparams=hparams, - quantiles_dir=quantiles_dir) -``` - -Note: To see how this function gets called from `main`, see [Calibrated Linear -Model](#calibrated-linear-model). - -In this example it will calibrate the inputs (using up to 200 keypoints, per -`num_keypoints`) and then randomly distribute them into 100 lattices -(`num_lattices`, a feature can be used by more than one lattice). - -The lattices and the calibration are all trained jointly. - -Like with calibrated lattice models, but without the limitations on the number -of features, it supports: - -* __Enforced monotonicity__: in the calibration (can be increasing or - decreasing), and in the lattice (one must also set the calibration to be - monotonic, and enable lattice monotonicity). Both can be selected per - feature. -* __Missing value handle__: Missing values can be calibrated automatically for - some special value, or can have their own value in the lattice. Controlled - per feature through the parameters: `missing_input_value` and - `missing_vertex`. -* __Semantic regularization__: rich set of regularization that can be applied - independently to the calibration and lattice. Can be set globally or per - feature. See their description in the [Concepts](#concepts) section. -* Flexible size: lattice can easily be adjusted to different granularity per - feature by setting `lattice_size`. This allows it lots of power to - aproximate any function. - -Note: The `lattice_rank` hyperparameter controls how many features are seen in -_combination_. It is often used as a regularization on the complexity of -interactions allowed among the features. But as with calibrated lattices this is -limited to 10 or 20 features at most combined in the same lattices. If you -wonder if the model could pick better than random features to be combined in -lattices, check out the next session, on [Embedded Tiny Lattices -Model](#embedded-tiny-lattices) - -Calibrated RTL models are available as classifier or regressor by -[`tfl.calibrated_rtl_classifier`](../api_docs/python/tensorflow_lattice/calibrated_rtl_classifier.md) -and -[`tfl.calibrated_rtl_regressor`](../api_docs/python/tensorflow_lattice/calibrated_rtl_regressor.md) -constructors. - -Documentation on all hyperparameters in -[`tfl.CalibratedLatticeHParams`](../api_docs/python/tensorflow_lattice/CalibratedLatticeHParams.md) - -Note: See above in section [Calibrated Linear Model](#calibrated-linear-model) -on how this function gets called from `main`. - -### Embedded Tiny Lattices Model - -Calibrated "Embedded Tiny Lattices" (ETL) models, like calibrated [RTL -models](#random-tiny-lattices-model), first calibrate the input and connect -those calibrated signals into an ensemble of lattices. But as opposed to have -each lattice take as input a subset of the calibrated features, in ETL models it -takes as input an embedding of the input features: each input is a linear -combination of the calibrated features. - -The number of lattices is defined by 'monotonic_num_lattices' and -'non_monotonic_num_lattices': monotonic lattices can take as input monotonic -features and non-monotonic features. Non-monotonic lattices can only take -non-monotonic features as input (otherwise monotonicity could be broken). - -The size of the embedding to be used in each lattice is given by -`monotonic_lattice_rank` and `non_monotonic_lattice_rank`. Each lattice has it's -own embedding: calibration, embedding and lattices are trained jointly. - -The size of the lattices, which gives resolution for them is given by -`monotonic_lattice_size` and `non_monotonic_lattice_size`. - -Calibrated ETL models are available as classifier or regressor by -[`tfl.calibrated_etl_classifier`](../api_docs/python/tensorflow_lattice/calibrated_etl_classifier.md) -and -[`tfl.calibrated_etl_regressor`](../api_docs/python/tensorflow_lattice/calibrated_etl_regressor.md) -constructors. - -Embedded tiny lattices can be __more powerful__ than [RTL -models](#random-tiny-lattices-model), but they __sacrifice some of the "semantic -regularization"__ (same regularization options are available, but they apply to -abstract embeddings), and are __slower to train__. __Monotonicity still is well -supported__. - -See details in paper [Deep Lattice Networks and Partial Monotonic -Functions](https://research.google.com/pubs/pub46327.html). ETL implements only -one layer deep lattice models, but deeper models can be built by composing -lattice layers, in the [next session](#tensorflow-lattice-layers) - -In our example -[`uci_census`](https://github.com/tensorflow/lattice/blob/master/examples/uci_census.py) -model, using only non-monotonic signals: - -```python -def create_calibrated_etl(feature_columns, config, quantiles_dir): - # No enforced monotonicity in this example. - feature_names = [fc.name for fc in feature_columns] - hparams = tfl.CalibratedEtlHParams( - feature_names=feature_names, - num_keypoints=200, - learning_rate=0.02, - non_monotonic_num_lattices=200, - non_monotonic_lattice_rank=2, - non_monotonic_lattice_size=2, - calibration_l2_laplacian_reg=4.0e-3, - lattice_l2_laplacian_reg=1.0e-5, - lattice_l2_torsion_reg=4.0e-4) - hparams.parse(FLAGS.hparams) - _pprint_hparams(hparams) - return tfl.calibrated_etl_classifier( - feature_columns=feature_columns, - model_dir=config.model_dir, - config=config, - hparams=hparams, - quantiles_dir=quantiles_dir) -``` - -Note: To see how this function gets called from `main`, see [Calibrated Linear -Model](#calibrated-linear-model). - --------------------------------------------------------------------------------- - -## TensorFlow Lattice Layers - -TensorFlow Lattice layer components are also provided by the library, so users -can combine them in more flexible or advanced ways. - -The following are the layer components included in the TensorFlow Lattice -library: - -* __Piecewise-Linear Calibration__: - * [`tfl.input_calibration_layer`](../api_docs/python/tensorflow_lattice/input_calibration_layer.md): - Calibrates the "input", provided either as `FeatureColumn`s or as a dict - of columns to tensors, the typical object returned by an `input_fn` - function. Includes support for monotonicity, regularization and special - "missing" values. - * [`tfl.input_calibration_layer_from_hparams`](../api_docs/python/tensorflow_lattice/input_calibration_layer_from_hparams.md): - Calibrates the "input", provided either as `FeatureColumn`s or as a dict - of columns to tensors, the typical object returned by an `input_fn` - function. Includes support for monotonicity, regularization and special - "missing" values. This version uses an `tfl.CalibrateHParams` to specify - the hyperparameters. - * [`tfl.calibration_layer`](../api_docs/python/tensorflow_lattice/calibration_layer.md): - Calibrates a tensor of shape \[batch_size, ...\]. Each element (outside - the batch-dimension) gets its own calibration. Includes support for - monotonicity, regularization and special "missing" values. -* __Lattice Layer__: - * [`tfl.lattice_layer`](../api_docs/python/tensorflow_lattice/lattice_layer.md): - Creates `output_dim` lattices that uses as input a tensor of shape - \[batch_size, input_dim\]. Lattice size is defined for each dimension of - `input_dim`. The total number of parameters is the product of all these - lattice sizes times `output_dim`. Full support of monotonicity and - regularization. - * [`tfl.ensemble_lattices_layer`](../api_docs/python/tensorflow_lattice/ensemble_lattices_layer.md): - Creates an ensemble of lattices connecting inputs as specified by the - caller. Full support of monotonicity and regularization. - -Example *calibrated_dnn*, a custom estimator from our example -[`uci_census`](https://github.com/tensorflow/lattice/blob/master/examples/uci_census.py) -model: - -```python -def create_calibrated_dnn(feature_columns, config, quantiles_dir): - """Creates a calibrated DNN model.""" - # This is an example of a hybrid model that uses input calibration layer - # offered by TensorFlow Lattice library and connects it to DNN. - feature_names = [fc.name for fc in feature_columns] - hparams = tfl.CalibratedHParams( - feature_names=feature_names, - num_keypoints=200, - learning_rate=1.0e-3, - calibration_output_min=-1.0, - calibration_output_max=1.0, - nodes_per_layer=10, # All layers have the same number of nodes. - layers=2, # Includes output layer, therefore >= 1. - ) - hparams.parse(FLAGS.hparams) - _pprint_hparams(hparams) - - def _model_fn(features, labels, mode, params): - """Model construction closure used when creating estimator.""" - del mode - del params # They are read directly from the bound variable hparams - - # Calibrate: since there is no monotonicity, there are no projection ops. - # We also discard the ordered names of the features. - (output, _, _, regularization) = tfl.input_calibration_layer_from_hparams( - features, feature_columns, hparams, quantiles_dir) - - # Hidden-layers. - for _ in range(hparams.layers - 1): - output = tf.layers.dense( - inputs=output, units=hparams.nodes_per_layer, activation=tf.sigmoid) - - # Classifier logits and prediction. - logits = tf.layers.dense(inputs=output, units=1) - predictions = tf.reshape(tf.sigmoid(logits), [-1]) - - # Notice loss doesn't include regularization, which is added separately - # by means of tf.contrib.layers.apply_regularization(). - loss_no_regularization = tf.losses.log_loss(labels, predictions) - loss = loss_no_regularization - if regularization is not None: - loss += regularization - optimizer = tf.train.AdamOptimizer(learning_rate=hparams.learning_rate) - train_op = optimizer.minimize( - loss, - global_step=tf.train.get_global_step(), - name="calibrated_dnn_minimize") - - eval_metric_ops = { - "accuracy": tf.metrics.accuracy(labels, predictions), - - # We want to report the loss without the regularization, so metric is - # comparable with different regularizations. FutureWork, list both. - "average_loss": tf.metrics.mean(loss_no_regularization), - } - - return tf.estimator.EstimatorSpec(mode, predictions, loss, train_op, - eval_metric_ops) - - # Hyperparameters are passed directly to the model_fn closure by the context. - return tf.estimator.Estimator( - model_fn=_model_fn, - model_dir=config.model_dir, - config=config, - params=None) -``` - -### Other potential use cases of these components - -* If integrating an embedding from another model (transfer-learning); -* Use TensorFlow Lattice\'s calibration in a DNN: works much better than - gaussian normalization of inputs. Something else that has been used with - some success is the piecewise linear function as an activation function. -* Use lattices on the "upper" (closer to output) layers of a DNN, for its - regularization. -* Use the piecewise-linear calibration as an activation function for neural - networks. -* Use piecewise-linear calibration as a probability distribution function for - learning continuous values in a Reinforcement Learning set up (REINFORCE - algorithm). - -## Papers - -* [Lattice Regression](https://papers.nips.cc/paper/3694-lattice-regression), - Eric Garcia, Maya Gupta, Advances in Neural Information Processing Systems - (NIPS), 2009 -* [Optimized Regression for Efficient Function - Evaluation](http://ieeexplore.ieee.org/document/6203580/), Eric Garcia, - Raman Arora, Maya R. Gupta, IEEE Transactions on Image Processing, 2012 -* [Monotonic Calibrated Interpolated Look-Up - Tables](http://jmlr.org/papers/v17/15-243.html), Maya Gupta, Andrew Cotter, - Jan Pfeifer, Konstantin Voevodski, Kevin Canini, Alexander Mangylov, - Wojciech Moczydlowski, Alexander van Esbroeck, Journal of Machine Learning - Research (JMLR), 2016 -* [Fast and Flexible Monotonic Functions with Ensembles of - Lattices](https://papers.nips.cc/paper/6377-fast-and-flexible-monotonic-functions-with-ensembles-of-lattices), - Mahdi Milani Fard, Kevin Canini, Andrew Cotter, Jan Pfeifer, Maya Gupta, - Advances in Neural Information Processing Systems (NIPS), 2016 -* [Deep Lattice Networks and Partial Monotonic - Functions](https://research.google.com/pubs/pub46327.html), Seungil You, - Kevin Canini, David Ding, Jan Pfeifer, Maya R. Gupta, Advances in Neural - Information Processing Systems (NIPS), 2017 diff --git a/pip_pkg.sh b/pip_pkg.sh deleted file mode 100755 index dc483a0..0000000 --- a/pip_pkg.sh +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -set -e - -PLATFORM="$(uname -s | tr 'A-Z' 'a-z')" - -function main() { - if [ $# -lt 1 ] ; then - echo "No destination dir provided" - exit 1 - fi - - # Create the directory, then do dirname on a non-existent file inside it to - # give us an absolute paths with tilde characters resolved to the destination - # directory. Readlink -f is a cleaner way of doing this but is not available - # on a fresh macOS install. - mkdir -p "$1" - DEST="$(dirname "${1}/does_not_exist")" - echo "=== destination directory: ${DEST}" - - TMPDIR=$(mktemp -d -t tmp.XXXXXXXXXX) - - echo $(date) : "=== Using tmpdir: ${TMPDIR}" - - echo "=== Copy TensorFlow Lattice files" - # Here are bazel-bin/pip_pkg.runfiles directory structure. - # bazel-bin/pip_pkg.runfiles - # |- - # |- org_python_pypi_backports_weakref - # |- org_tensorflow - # |- protobuf - # |- six_archive - # |- tensorflow_lattice - # |- external - # |- pip_pkg - # |- pip_pkg.sh - # |- MANIFEST.in (needed) - # |- setup.py (needed) - # |- tensorflow_lattice (needed) - # - # To build tensorflow lattice wheel, we only need setup.py, MANIFEST.in, and - # python and .so files under tensorflow_lattice/tensorflow_lattice. - # So we extract those to ${TMPDIR}. - cp bazel-bin/pip_pkg.runfiles/tensorflow_lattice/setup.py "${TMPDIR}" - cp bazel-bin/pip_pkg.runfiles/tensorflow_lattice/MANIFEST.in "${TMPDIR}" - cp -R \ - bazel-bin/pip_pkg.runfiles/tensorflow_lattice/tensorflow_lattice \ - "${TMPDIR}" - - echo "=== Copy TensorFlow Lattice root and cc files" - cp README.md ${TMPDIR} - cp LICENSE ${TMPDIR} - cp -R \ - tensorflow_lattice/cc \ - "${TMPDIR}/tensorflow_lattice" - - pushd ${TMPDIR} - if [ "${TFL_SDIST}" = true ]; then - echo $(date) : "=== Building source distribution and wheel" - else - echo $(date) : "=== Building wheel" - fi - - if [ -z "$2" ]; then - if [ "${TFL_SDIST}" = true ]; then - python setup.py sdist > /dev/null - fi - python setup.py bdist_wheel > /dev/null - else - if [ "${TFL_SDIST}" = true ]; then - python setup.py "$2" sdist > /dev/null - fi - python setup.py "$2" bdist_wheel >/dev/null - fi - - cp dist/* "${DEST}" - popd - rm -rf ${TMPDIR} - echo $(date) : "=== Output tar ball and wheel file are in: ${DEST}" -} - -main "$@" diff --git a/setup.py b/setup.py index 3e95688..cf46697 100644 --- a/setup.py +++ b/setup.py @@ -1,114 +1,105 @@ -# pylint: disable=g-bad-file-header -# Copyright 2017 The TensorFlow Lattice Authors. +# Copyright 2018 The TensorFlow Lattice Authors. # -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================= -"""Setup for pip package.""" +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# ============================================================================== +"""Package setup script for TensorFlow Lattice library.""" + from __future__ import absolute_import from __future__ import division from __future__ import print_function +import datetime import sys -import warnings from setuptools import find_packages from setuptools import setup -from setuptools.command.install import install as InstallCommandBase -from setuptools.dist import Distribution - - -__version__ = '0.9.9' - - -REQUIRED_PACKAGES = [ - 'six >= 1.11.0', - 'protobuf >= 3.6.1', - 'numpy >= 1.14.5', -] +# This version number should always be that of the *next* (unreleased) version. +# Immediately after uploading a package to PyPI, you should increment the +# version number and push to gitHub. +__version__ = "2.0" -if '--gpu' in sys.argv: - use_gpu = True - sys.argv.remove('--gpu') +if "--release" in sys.argv: + sys.argv.remove("--release") + _name = "tensorflow_lattice" else: - use_gpu = False - - -if use_gpu: - project_name = 'tensorflow-lattice-gpu' - REQUIRED_PACKAGES.append('tensorflow-gpu==1.14.0') -else: - project_name = 'tensorflow-lattice' - REQUIRED_PACKAGES.append('tensorflow==1.14.0') - -CONSOLE_SCRIPTS = [ - 'freeze_graph_wrapper = ' - 'tensorflow_lattice.cc.tflite.freeze_graph_wrapper:main', - 'toco_wrapper = tensorflow_lattice.cc.tflite.toco_wrapper:main', + # Build a nightly package by default. + _name = "tensorflow_lattice_nightly" + __version__ += datetime.datetime.now().strftime(".dev%Y%m%d") + +_install_requires = [ + "absl-py", + "numpy", + "pandas", + "six", + "sklearn", + "matplotlib", + "graphviz", ] +# Part of the visualization code uses colabtools and IPython libraries. These +# are not added as hard requirements as they are mainly used in jupyter/colabs. + +_extras_require = { + "tensorflow": "tensorflow>=1.15", + "tensorflow-gpu": "tensorflow-gpu>=1.15", +} + +_classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Intended Audience :: Education", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Programming Language :: Python", + "Programming Language :: Python :: 2", + "Programming Language :: Python :: 3", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: Mathematics", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries", + "Topic :: Software Development :: Libraries :: Python Modules", +] -class BinaryDistribution(Distribution): - """This class is needed in order to create OS specific wheels.""" - - def has_ext_modules(self): - return True - - -warnings.warn('tensorflow-lattice is likley to fail when building from a ' - 'source distribution (sdist). Please follow instructions in ' - '(https://github.com/tensorflow/lattice/INSTALL.md) ' - 'to build this from the source.') - +_description = ( + "A library that implements optionally monotonic lattice based models.") +_long_description = """\ +TensorFlow Lattice is a library that implements fast-to-evaluate and +interpretable (optionally monotonic) lattice based models, which are also known +as *interpolated look-up tables*. The library includes a collection of +Estimators, which operate like any TensorFlow Estimator. It also includes +Keras layers for lattices and feature calibration that can be composed +into custom models. +""" setup( - name=project_name, + name=_name, version=__version__, - description=('TensorFlow Lattice provides lattice models in TensorFlow'), - long_description='', - url='https://github.com/tensorflow/lattice', - author='Google Inc.', - author_email='tensorflow-lattice-releasing@google.com', - # Contained modules and scripts. + author="Google Inc.", + author_email="no-reply@google.com", + license="Apache 2.0", + classifiers=_classifiers, + install_requires=_install_requires, + extras_require=_extras_require, packages=find_packages(), - install_requires=REQUIRED_PACKAGES, - # Add in any packaged data. include_package_data=True, - package_data={'': ['*.so']}, - exclude_package_data={'': ['BUILD', '*.h', '*.cc']}, - zip_safe=False, - distclass=BinaryDistribution, - cmdclass={ - 'pip_pkg': InstallCommandBase, - }, - entry_points={ - 'console_scripts': CONSOLE_SCRIPTS - }, - # PyPI package information. - classifiers=[ - 'Development Status :: 4 - Beta', - 'Intended Audience :: Developers', - 'Intended Audience :: Education', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: Apache Software License', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Topic :: Scientific/Engineering :: Mathematics', - 'Topic :: Software Development :: Libraries :: Python Modules', - 'Topic :: Software Development :: Libraries', - ], - license='Apache 2.0', - keywords='lattice tensorflow tensor machine learning', + description=_description, + long_description=_long_description, + long_description_content_type="text/markdown", + keywords="tensorflow lattice calibration machine learning", + url=( + "https://github.com/tensorflow/lattice" + ), ) diff --git a/tensorflow b/tensorflow deleted file mode 160000 index 456fbc0..0000000 --- a/tensorflow +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 456fbc0e498e3d10604973de9f46ca48d62267cc diff --git a/tensorflow_lattice/BUILD b/tensorflow_lattice/BUILD index 37af0c8..835d220 100644 --- a/tensorflow_lattice/BUILD +++ b/tensorflow_lattice/BUILD @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -licenses(["notice"]) # Apache 2.0 License package( default_visibility = [ @@ -20,38 +19,31 @@ package( ], ) +licenses(["notice"]) + exports_files(["LICENSE"]) py_library( name = "tensorflow_lattice", - srcs = ["__init__.py"], - srcs_version = "PY2AND3", - deps = [ - "//tensorflow_lattice/python:keypoints_initialization", - "//tensorflow_lattice/python:lattice_layers", - "//tensorflow_lattice/python:lattice_ops_py", - "//tensorflow_lattice/python:pwl_calibration_layers", - "//tensorflow_lattice/python:pwl_calibration_ops_py", - "//tensorflow_lattice/python:regularizers", - "//tensorflow_lattice/python:tools", - "//tensorflow_lattice/python/estimators:base", - "//tensorflow_lattice/python/estimators:calibrated", - "//tensorflow_lattice/python/estimators:calibrated_etl", - "//tensorflow_lattice/python/estimators:calibrated_lattice", - "//tensorflow_lattice/python/estimators:calibrated_linear", - "//tensorflow_lattice/python/estimators:calibrated_rtl", - "//tensorflow_lattice/python/estimators:hparams", - "//tensorflow_lattice/python/estimators:separately_calibrated_rtl", + srcs = [ + "__init__.py", + "layers/__init__.py", ], -) - -# Depend on this if you have a C++ library or binary that uses TensorFlow -# lattice ops. -cc_library( - name = "tensorflow_lattice_cc", + srcs_version = "PY2AND3", deps = [ - "//tensorflow_lattice/cc:lattice_ops", - "//tensorflow_lattice/cc:pwl_calibration_ops", + "//tensorflow_lattice/python:categorical_calibration_layer", + "//tensorflow_lattice/python:categorical_calibration_lib", + "//tensorflow_lattice/python:configs", + "//tensorflow_lattice/python:estimators", + "//tensorflow_lattice/python:lattice_layer", + "//tensorflow_lattice/python:lattice_lib", + "//tensorflow_lattice/python:linear_layer", + "//tensorflow_lattice/python:linear_lib", + "//tensorflow_lattice/python:model_info", + "//tensorflow_lattice/python:parallel_combination_layer", + "//tensorflow_lattice/python:pwl_calibration_layer", + "//tensorflow_lattice/python:pwl_calibration_lib", + "//tensorflow_lattice/python:test_utils", + "//tensorflow_lattice/python:visualization", ], - alwayslink = 1, ) diff --git a/tensorflow_lattice/__init__.py b/tensorflow_lattice/__init__.py index 03fcc52..2aeca52 100644 --- a/tensorflow_lattice/__init__.py +++ b/tensorflow_lattice/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2017 The TensorFlow Lattice Authors. +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,57 +11,27 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# ============================================================================== -"""Lattice modeling. - -This package provides functions and classes for lattice modeling. - -See full description in `README.md` file. +"""Tensorflow Lattice Library. - use them. +This package provides functions and classes for lattice modeling. """ -# pylint: disable=unused-import,wildcard-import, line-too-long - from __future__ import absolute_import -# Dependency imports - -# Import all modules here, but only import functions and classes that are -# more likely to be used directly by users. -from tensorflow_lattice.python.estimators.calibrated import input_calibration_layer_from_hparams -from tensorflow_lattice.python.estimators.calibrated_etl import calibrated_etl_classifier -from tensorflow_lattice.python.estimators.calibrated_etl import calibrated_etl_regressor -from tensorflow_lattice.python.estimators.calibrated_lattice import calibrated_lattice_classifier -from tensorflow_lattice.python.estimators.calibrated_lattice import calibrated_lattice_regressor -from tensorflow_lattice.python.estimators.calibrated_linear import calibrated_linear_classifier -from tensorflow_lattice.python.estimators.calibrated_linear import calibrated_linear_regressor -from tensorflow_lattice.python.estimators.calibrated_rtl import calibrated_rtl_classifier -from tensorflow_lattice.python.estimators.calibrated_rtl import calibrated_rtl_regressor -from tensorflow_lattice.python.estimators.hparams import CalibratedEtlHParams -from tensorflow_lattice.python.estimators.hparams import CalibratedHParams -from tensorflow_lattice.python.estimators.hparams import CalibratedLatticeHParams -from tensorflow_lattice.python.estimators.hparams import CalibratedLinearHParams -from tensorflow_lattice.python.estimators.hparams import CalibratedRtlHParams -from tensorflow_lattice.python.estimators.hparams import PerFeatureHParams -from tensorflow_lattice.python.estimators.separately_calibrated_rtl import separately_calibrated_rtl_classifier -from tensorflow_lattice.python.estimators.separately_calibrated_rtl import separately_calibrated_rtl_regressor -from tensorflow_lattice.python.lib.keypoints_initialization import load_keypoints_from_quantiles -from tensorflow_lattice.python.lib.keypoints_initialization import save_quantiles_for_keypoints -from tensorflow_lattice.python.lib.keypoints_initialization import save_quantiles_for_keypoints_once -from tensorflow_lattice.python.lib.keypoints_initialization import uniform_keypoints_for_signal -from tensorflow_lattice.python.lib.lattice_layers import ensemble_lattices_layer -from tensorflow_lattice.python.lib.lattice_layers import lattice_layer -from tensorflow_lattice.python.lib.lattice_layers import monotone_lattice -from tensorflow_lattice.python.lib.monotone_linear_layers import monotone_linear_layer -from tensorflow_lattice.python.lib.monotone_linear_layers import split_monotone_linear_layer -from tensorflow_lattice.python.lib.pwl_calibration_layers import calibration_layer -from tensorflow_lattice.python.lib.pwl_calibration_layers import input_calibration_layer -from tensorflow_lattice.python.lib.regularizers import calibrator_regularization -from tensorflow_lattice.python.lib.regularizers import lattice_regularization -from tensorflow_lattice.python.lib.tools import DEFAULT_NAME -from tensorflow_lattice.python.ops.gen_monotonic_projection import monotonic_projection -from tensorflow_lattice.python.ops.gen_pwl_indexing_calibrator import pwl_indexing_calibrator -from tensorflow_lattice.python.ops.lattice_ops import lattice -# pylint: enable=unused-import,wildcard-import,line-too-long +import tensorflow_lattice.layers + +from tensorflow_lattice.python import categorical_calibration_layer +from tensorflow_lattice.python import categorical_calibration_lib +from tensorflow_lattice.python import configs +from tensorflow_lattice.python import estimators +from tensorflow_lattice.python import lattice_layer +from tensorflow_lattice.python import lattice_lib +from tensorflow_lattice.python import linear_layer +from tensorflow_lattice.python import linear_lib +from tensorflow_lattice.python import model_info +from tensorflow_lattice.python import parallel_combination_layer +from tensorflow_lattice.python import pwl_calibration_layer +from tensorflow_lattice.python import pwl_calibration_lib +from tensorflow_lattice.python import test_utils +from tensorflow_lattice.python import visualization diff --git a/tensorflow_lattice/cc/BUILD b/tensorflow_lattice/cc/BUILD deleted file mode 100644 index d656718..0000000 --- a/tensorflow_lattice/cc/BUILD +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -licenses(["notice"]) # Apache 2.0 License - -package( - default_visibility = [ - "//tensorflow_lattice:__subpackages__", - ], -) - -exports_files(["LICENSE"]) - -load( - "//tensorflow_lattice:tensorflow_lattice.bzl", - "rpath_linkopts", -) -load( - "@org_tensorflow//tensorflow:tensorflow.bzl", - "tf_cc_test", - "tf_custom_op_library", - "tf_gen_op_libs", -) - -tf_custom_op_library( - name = "ops/_lattice_ops.so", - srcs = [ - ":ops/lattice_interpolation_ops.cc", - ":ops/monotone_lattice_ops.cc", - ], - linkopts = rpath_linkopts("ops/_lattice_ops.so"), - deps = [ - "//tensorflow_lattice/cc/kernels:lattice_kernels", - "//tensorflow_lattice/cc/lib:lattice_structure", - ], -) - -tf_custom_op_library( - name = "ops/_pwl_calibration_ops.so", - srcs = [ - ":ops/monotonic_projection_op.cc", - ":ops/pwl_indexing_calibrator_ops.cc", - ], - linkopts = rpath_linkopts("ops/_pwl_calibration_ops.so"), - deps = [ - "//tensorflow_lattice/cc/kernels:pwl_calibration_kernels", - ], -) - -cc_library( - name = "lattice_ops", - deps = [ - ":lattice_interpolation_ops_op_lib", - ":monotone_lattice_ops_op_lib", - ], - alwayslink = 1, -) - -cc_library( - name = "pwl_calibration_ops", - deps = [ - ":monotonic_projection_op_op_lib", - ":pwl_indexing_calibrator_ops_op_lib", - ], - alwayslink = 1, -) - -# Collection of operators. -tf_gen_op_libs( - op_lib_names = ["pwl_indexing_calibrator_ops"], - deps = [ - "//tensorflow_lattice/cc/kernels:pwl_indexing_calibrator_kernels", - "@org_tensorflow//tensorflow/core:lib", - ], -) - -tf_gen_op_libs( - op_lib_names = ["monotonic_projection_op"], - deps = [ - "//tensorflow_lattice/cc/kernels:monotonic_projection_kernel", - "@org_tensorflow//tensorflow/core:lib", - ], -) - -tf_gen_op_libs( - op_lib_names = ["lattice_interpolation_ops"], - deps = [ - "//tensorflow_lattice/cc/kernels:hypercube_interpolation_kernels", - "//tensorflow_lattice/cc/kernels:lattice_interpolation_base", - "//tensorflow_lattice/cc/kernels:simplex_interpolation_kernels", - "//tensorflow_lattice/cc/lib:lattice_structure", - "@org_tensorflow//tensorflow/core:lib", - ], -) - -tf_gen_op_libs( - op_lib_names = ["monotone_lattice_ops"], - deps = [ - "//tensorflow_lattice/cc/kernels:monotone_lattice_kernels", - "//tensorflow_lattice/cc/lib:lattice_structure", - "@org_tensorflow//tensorflow/core:lib", - ], -) - -# C++ tests. -cc_library( - name = "test_main", - testonly = 1, - srcs = ["test_tools/test_main.cc"], - deps = [ - "@org_tensorflow//tensorflow/core:test", - ], -) - -tf_cc_test( - name = "pwl_indexing_calibrator_ops_test", - size = "small", - srcs = ["ops/pwl_indexing_calibrator_ops_test.cc"], - linkopts = rpath_linkopts("pwl_indexing_calibrator_ops_test"), - deps = [ - ":pwl_indexing_calibrator_ops_op_lib", - ":test_main", - "@org_tensorflow//tensorflow/core:framework", - "@org_tensorflow//tensorflow/core:lib", - "@org_tensorflow//tensorflow/core:test", - "@org_tensorflow//tensorflow/core:testlib", - "@org_tensorflow//tensorflow/core/kernels:ops_testutil", - ], -) - -cc_library( - name = "hypercube_interpolation_ops_test_lib", - testonly = 1, - srcs = ["ops/hypercube_interpolation_ops_test_p.cc"], - hdrs = ["ops/hypercube_interpolation_ops_test.h"], - linkopts = rpath_linkopts("hypercube_interpolation_ops_test"), - deps = [ - ":lattice_interpolation_ops_op_lib", - ":test_main", - "@org_tensorflow//tensorflow/core:core_cpu", - "@org_tensorflow//tensorflow/core:framework", - "@org_tensorflow//tensorflow/core:lib", - "@org_tensorflow//tensorflow/core:protos_all_cc", - "@org_tensorflow//tensorflow/core:test", - "@org_tensorflow//tensorflow/core:testlib", - "@org_tensorflow//tensorflow/core/kernels:ops_testutil", - "@org_tensorflow//tensorflow/core/kernels:ops_util_hdrs", - ], -) - -tf_cc_test( - name = "hypercube_interpolation_ops_test", - size = "small", - srcs = ["ops/hypercube_interpolation_ops_test.cc"], - deps = [ - ":hypercube_interpolation_ops_test_lib", - "@org_tensorflow//tensorflow/core:framework", - "@org_tensorflow//tensorflow/core:test", - "@org_tensorflow//tensorflow/core:testlib", - ], -) - -tf_cc_test( - name = "simplex_interpolation_ops_test", - size = "small", - srcs = ["ops/simplex_interpolation_ops_test.cc"], - linkopts = rpath_linkopts("simplex_interpolation_ops_test"), - deps = [ - ":lattice_interpolation_ops_op_lib", - ":test_main", - "@org_tensorflow//tensorflow/core:framework", - "@org_tensorflow//tensorflow/core:lib", - "@org_tensorflow//tensorflow/core:test", - "@org_tensorflow//tensorflow/core:testlib", - "@org_tensorflow//tensorflow/core/kernels:ops_testutil", - ], -) - -tf_cc_test( - name = "monotonic_projection_op_test", - size = "small", - srcs = ["ops/monotonic_projection_op_test.cc"], - linkopts = rpath_linkopts("monotonic_projection_op_test"), - deps = [ - ":monotonic_projection_op_op_lib", - ":test_main", - "@org_tensorflow//tensorflow/core:framework", - "@org_tensorflow//tensorflow/core:lib", - "@org_tensorflow//tensorflow/core:test", - "@org_tensorflow//tensorflow/core:testlib", - "@org_tensorflow//tensorflow/core/kernels:ops_testutil", - ], -) diff --git a/tensorflow_lattice/cc/kernels/BUILD b/tensorflow_lattice/cc/kernels/BUILD deleted file mode 100644 index ca9b499..0000000 --- a/tensorflow_lattice/cc/kernels/BUILD +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -licenses(["notice"]) # Apache 2.0 - -package( - default_visibility = [ - "//tensorflow_lattice:__subpackages__", - ], -) - -load( - "//tensorflow_lattice:tensorflow_lattice.bzl", - "rpath_linkopts", -) -load("@org_tensorflow//tensorflow:tensorflow.bzl", "tf_kernel_library") -load("@org_tensorflow//tensorflow:tensorflow.bzl", "tf_cc_test") - -# Piecewise-linear calibration kernels -cc_library( - name = "pwl_calibration_kernels", - deps = [ - ":monotonic_projection_kernel", - ":pwl_indexing_calibrator_kernels", - ], -) - -tf_kernel_library( - name = "pwl_indexing_calibrator_kernels", - srcs = ["pwl_indexing_calibrator_kernels.cc"], - deps = [ - "@org_tensorflow//tensorflow/core:framework_headers_lib", - "@org_tensorflow//tensorflow/core:framework_lite", - "@protobuf_archive//:protobuf", - ], -) - -tf_kernel_library( - name = "monotonic_projection_kernel", - srcs = ["monotonic_projection_kernel.cc"], - deps = [ - ":monotonic_projections", - "@org_tensorflow//tensorflow/core:framework_headers_lib", - "@protobuf_archive//:protobuf", - ], -) - -# Lattice interpolation kernels -cc_library( - name = "lattice_kernels", - deps = [ - ":hypercube_interpolation_kernels", - ":monotone_lattice_kernels", - ":simplex_interpolation_kernels", - ], -) - -cc_library( - name = "lattice_interpolation_base", - srcs = ["lattice_interpolation_base.cc"], - hdrs = ["lattice_interpolation_base.h"], - deps = [ - "//tensorflow_lattice/cc/lib:lattice_structure", - "@org_tensorflow//tensorflow/core:framework_headers_lib", - ], -) - -tf_kernel_library( - name = "hypercube_interpolation_kernels", - srcs = ["hypercube_interpolation_kernels.cc"], - deps = [ - ":lattice_interpolation_base", - "//tensorflow_lattice/cc/lib:lattice_structure", - "@org_tensorflow//tensorflow/core:framework_headers_lib", - ], - alwayslink = 1, -) - -tf_kernel_library( - name = "simplex_interpolation_kernels", - srcs = ["simplex_interpolation_kernels.cc"], - deps = [ - ":lattice_interpolation_base", - "//tensorflow_lattice/cc/lib:lattice_structure", - "@org_tensorflow//tensorflow/core:framework_headers_lib", - ], - alwayslink = 1, -) - -# Monotonic projections. -cc_library( - name = "monotonic_projections", - hdrs = ["monotonic_projections.h"], - deps = [ - "@org_tensorflow//tensorflow/core:framework_headers_lib", - "@org_tensorflow//tensorflow/core:framework_lite", - ], -) - -cc_library( - name = "lattice_raw_iterator", - srcs = ["lattice_raw_iterator.cc"], - hdrs = ["lattice_raw_iterator.h"], - deps = ["//tensorflow_lattice/cc/lib:lattice_structure"], -) - -tf_cc_test( - name = "lattice_raw_iterator_test", - srcs = ["lattice_raw_iterator_test.cc"], - linkopts = rpath_linkopts("lattice_raw_iterator_test"), - deps = [ - ":lattice_raw_iterator", - "//tensorflow_lattice/cc:test_main", - "//tensorflow_lattice/cc/lib:lattice_structure", - "@org_tensorflow//tensorflow/core:lib", - "@org_tensorflow//tensorflow/core:test", - ], -) - -cc_library( - name = "monotonic_lattice_projections", - hdrs = ["monotonic_lattice_projections.h"], - deps = [ - ":lattice_raw_iterator", - ":monotonic_projections", - "//tensorflow_lattice/cc/lib:lattice_structure", - "@org_tensorflow//tensorflow/core:framework_headers_lib", - ], -) - -tf_cc_test( - name = "monotonic_lattice_projections_test", - srcs = ["monotonic_lattice_projections_test.cc"], - linkopts = rpath_linkopts("monotonic_lattice_projections_test"), - deps = [ - ":monotonic_lattice_projections", - "//tensorflow_lattice/cc:test_main", - "//tensorflow_lattice/cc/lib:lattice_structure", - "@org_tensorflow//tensorflow/core:lib", - "@org_tensorflow//tensorflow/core:test", - ], -) - -# Monotone lattice kernels. -tf_kernel_library( - name = "monotone_lattice_kernels", - srcs = ["monotone_lattice_kernels.cc"], - deps = [ - ":lattice_interpolation_base", - ":monotonic_lattice_projections", - "//tensorflow_lattice/cc/lib:lattice_structure", - "@org_tensorflow//tensorflow/core:framework_headers_lib", - ], - alwayslink = 1, -) diff --git a/tensorflow_lattice/cc/kernels/hypercube_interpolation_kernels.cc b/tensorflow_lattice/cc/kernels/hypercube_interpolation_kernels.cc deleted file mode 100644 index 7de1672..0000000 --- a/tensorflow_lattice/cc/kernels/hypercube_interpolation_kernels.cc +++ /dev/null @@ -1,348 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include -#include - -#include "tensorflow_lattice/cc/kernels/lattice_interpolation_base.h" -#include "tensorflow_lattice/cc/lib/lattice_structure.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/tensor.h" -#include "tensorflow/core/platform/logging.h" - -namespace tensorflow { -namespace lattice { - -// HypercubeInterpolationOpKernel returns interpolation weights. -template -class HypercubeInterpolationOpKernel - : public LatticeInterpolationOpBase { - public: - explicit HypercubeInterpolationOpKernel(OpKernelConstruction* context) - : LatticeInterpolationOpBase(context) { - - constexpr int64 kBaseCost = 20; - constexpr int64 kCostPerCellVertex = 20; - constexpr int64 kWeightInitializationCost = 1; - this->SetCostPerExample( - kCostPerCellVertex * this->GetLatticeStructure().NumVerticesPerCell() + - kWeightInitializationCost * this->GetLatticeStructure().NumVertices() + - kBaseCost); - } - - private: - InterpolationWeights ComputeInterpolationWeights( - const LatticeStructure& lattice_structure, - typename TTypes::UnalignedConstFlat input_vector) const final; - - TF_DISALLOW_COPY_AND_ASSIGN(HypercubeInterpolationOpKernel); -}; - -// HypercubeGradientOpKernel returns gradient with respect to the -// input. -template -class HypercubeGradientOpKernel : public LatticeGradientOpBase { - public: - explicit HypercubeGradientOpKernel(OpKernelConstruction* context) - : LatticeGradientOpBase(context) { - - constexpr int64 kBaseCost = 20; - constexpr int64 kCostPerCellVertex = 20; - this->SetCostPerExample( - kCostPerCellVertex * this->GetLatticeStructure().Dimension() * - this->GetLatticeStructure().NumVerticesPerCell() + - kBaseCost); - } - - private: - std::vector ComputeGradWrtInput( - const LatticeStructure& lattice_structure, - typename TTypes::UnalignedConstFlat input_vector, - typename TTypes::UnalignedConstFlat weight_vector, - typename TTypes::UnalignedConstFlat grad_wrt_weight_vector) - const final; - - TF_DISALLOW_COPY_AND_ASSIGN(HypercubeGradientOpKernel); -}; - -// Produces linear interpolation weights for an input that is in the unit -// hypercube (the residual), as well as the corresponding indices in the lattice -// (based on the bottom_corner). Both the weights and the indices are computed -// during the same loop for efficiency, but we'll explain their computations -// separately. Also returns the residual vector from the bottom corner of the -// hypercube cell. See http://jmlr.org/papers/v17/15-243.html for more details. -// -// Calculating the linear interpolation weights -// -------------------------------------------- -// The linear interpolation weights on each vertex are the volumes of the -// hyperrectangles formed by partitioning the unit hypercube at the input. -// Example: 2D case - Draw a unit square. Draw an input x in the square. -// Draw horizontal and vertical lines through x. That forms 4 boxes - the -// volume of these boxes are the weights. Note the boxes are a partition of -// the unit square, so the sum of the areas (volumes) of the boxes sums to 1. -// The linear interpolation weight on each vertex is the volume of the box in -// the opposite corner (so that if you move x close to one corner, the weight -// on that corner grows). Mathematically in the 2D case (and generalizes -// directly to higher D) the weights are: -// weight([0, 0]) = (1 - input[0]) * (1 - input[1]) -// weight([0, 1]) = (1 - input[0]) * input[1] -// weight([1, 0]) = input[0] * (1 - input[1]) -// weight([1, 1]) = input[0] * input[1] -// -// Computing each of the 2^D weights directly using above formula would take -// O(2^D * D) operations. Instead we take advantage of the many repeated -// calculations to reduce this to a O(2^D) computation as follows: -// Let's start by initializing weight to 1 for every vertex. Lets consider -// current vertex. Suppose its bit representation is "0110". For every "0" we -// should multiply its weight on (1 - input[i]), where i is a sequence number -// of correspondent bit. And for each "1" we should multiply its weight on -// input[i]. -// Let us iterate through all vertices in dfs (lexicographical) order. Let -// current_highest_dimension be a sequence number of highest bit in binary -// representation of current vertex. At this moment we multiplied -// correspondent weights for all dimensions below current_highest_dimension. -// Now, let us update current_highest_dimension. -// Example: -// If "ii" is iterating on "??x010" (the location of memory where finally the -// weight for "00_1_010" will be stored), then we set the value for -// -// // Resetting bit x of ??x010. -// earlier_ii = ii ^ (1 << current_highest_dimension) -// // Now ii represents ?x1010. -// weight[ii] = weight[earlier_ii] * input[current_highest_dimension] -// // earlier_ii represents ?x0010. -// weight[earlier_ii] *= (1 - input[current_highest_dimension]) -// -// Example for 2x2 case: -// weight[0] is weight on [0,0] -// weight[1] is weight on [1,0] -// weight[2] is weight on [0,1] -// weight[3] is weight on [1,1] -// Initialization: weight[0] = 1, no other weight set. -// Loop: ii = 1. current_highest_dimension = 0 -// weight[1] = weight[0] * input[0]; -// weight[0] = weight[0] * (1 - input[0]) -// ii = 2. current_highest_dimension = 1. (highest bit of ii got index 1 at -// this step, so update current_highest_dimension to reflect this) -// weight[2] = weight[0] * input[1]; -// weight[0] = weight[0] * (1 - input[1]) -// ii = 3. current_highest_dimension = 1. -// weight[3] = weight[1] * input[1]; -// weight[1] = weight[1] * (1 - input[1]) -// -// Calculating the corresponding indices. Notice if the lattice sizes are larger -// than 2, the indices of the wieghts will be adjusted according to the -// LatticeStructure.strides. -// ------------------------------------- -// The lattice index for the iith vertex in the cell is the same as the index -// we computed for an earlier neighbor vertex, but offset by -// lattice_strides[(dimensions - 1) - current_highest_dimension]. -// Example: -// Suppose we have a 2x2 lattice. We should output vertices in the order: -// [0,0], [1,0], [0,1], [1,1]. -// Bottom corner is [0,0], so vertices[0] = 0 already set. -// let ii = 1. It corresponds to vertex [0,1]. current_highest_dimension = 0. -// lattice index of vertices[1] is different from lattice index of -// vertices[0] in dimension current_highest_dimension = 0 (counting from the -// end). -// vertices[1] = vertices[0] + lattice_strides[0] = [1, 0]; -// let ii = 2, it corresponds to [1,0]. current_highest_dimension becomes 1. -// vertices[2] = vertices[0] + lattice_strides[1] = [0, 1]; -// vertices[3] = vertices[1] + lattice_strides[1] = [1, 1]; -// - -template -InterpolationWeights -HypercubeInterpolationOpKernel::ComputeInterpolationWeights( - const LatticeStructure& lattice_structure, - typename TTypes::UnalignedConstFlat input) const { - const BottomCornerIndexAndResidual bottom_corner_index_and_residual = - lattice_structure.GetBottomCornerIndexAndResidual(input); - const std::vector& residual = - bottom_corner_index_and_residual.residual; - const int64 num_vertices_per_cell = lattice_structure.NumVerticesPerCell(); - // interpolation weight contains upto num_vertices_per_cell non-zero elements. - InterpolationWeights interpolation_weights; - std::vector& index = interpolation_weights.indices; - std::vector& weight = interpolation_weights.weights; - - index.resize(num_vertices_per_cell); - weight.resize(num_vertices_per_cell); - index[0] = bottom_corner_index_and_residual.bottom_corner_index; - weight[0] = 1.0; - - const int64 input_dim = lattice_structure.Dimension(); - const std::vector& strides = lattice_structure.Strides(); - - int64 current_highest_dimension = 0; - Dtype current_residual_value = residual[current_highest_dimension]; - for (int64 ii = 1; ii < num_vertices_per_cell; ++ii) { - // Make sure that we're within the bounds of the unit hypercube. - DCHECK_GE(current_residual_value, 0); - DCHECK_LE(current_residual_value, 1); - // Sanity check: current_highest_dimension has better respect the bounds. - DCHECK_GE(current_highest_dimension, 0); - DCHECK_LT(current_highest_dimension, input_dim); - - const int64 earlier_ii = ii ^ (1 << current_highest_dimension); - index[ii] = index[earlier_ii] + strides[current_highest_dimension]; - weight[ii] = weight[earlier_ii] * current_residual_value; - weight[earlier_ii] *= (1.0 - current_residual_value); - - if ((ii & (ii + 1)) == 0) { - // If ii + 1 is power of 2, then current_highest_dimension has changed, - // that means, that we are processing next dimension. - ++current_highest_dimension; - if (input_dim >= current_highest_dimension + 1) { - current_residual_value = residual[current_highest_dimension]; - } - } - } - return interpolation_weights; -} - -// The goal of the gradient op is, given grad_wrt_weight: -// (dy / dweight[0], dy / dweight[1], dy / dweight[2], dy / dweight[3]), -// to compute the grad_wrt_input: -// (dy / dx[0], ..., dy / dx[D-1]). -// -// We know that: -// dy/dx[jj] = sum_{ii \in weights} dy/dweight[ii] * dweight[ii]/dx[jj] -// -// For dweight[ii]/dx[jj], we use the following observation. -// For any 2 x ... x 2 lattices: -// weight[ii] + weight[jj] == constant. -// for all (ii, jj) pair such that ii ^ jj == 2 ** k and ii < jj. (This means -// ii's kth vertex is 0, and jj's kth vertex is 1, and other vertices are same.) -// Moreover, for such (ii, jj) pair, we have -// dweight[ii] / dx[k] == -(weight[ii] + weight[jj]) -// dweight[jj] / dx[k] == (weight[ii] + weight[jj]) -// -// To see this, let us consider 2 x 2 lattice case. -// -// Recall that -// weight[0] = (1 - x[0]) * (1 - x[1]) -// weight[1] = x[0] * (1 - x[1]) -// weight[2] = (1 - x[0]) * x[1] -// weight[3] = x[0] * x[1] -// -// Therefore, -// dweight[0] / dx[0] = -(1 - x[1]) == -(weight[0] + weight[1]) -// dweight[1] / dx[0] = (1 - x[1]) == (weight[0] + weight[1]) -// dweight[2] / dx[0] = -x[1] == -(weight[2] + weight[3]) -// dweight[3] / dx[0] = x[1] == (weight[2] + weight[3]), -// and -// dweight[0] / dx[1] = -(1 - x[0]) == -(weight[0] + weight[2]) -// dweight[1] / dx[1] = -x[0] == -(weight[1] + weight[3]) -// dweight[2] / dx[1] = (1 - x[0]) == (weight[0] + weight[2]) -// dweight[3] / dx[1] = x[0] == (weight[1] + weight[3]). -// -// So the summation part marginalize the dependency of x[k], and the sign is -// minus if the kth vertex is 0, and plus if the kth vertex is 1. -// The following code computes the gradient using the (ii, jj) pair by -// enumerating all indices whose kth vertex is 0. -// In order to support the multi-cell lattice, the code constructs a list -// (nnz_weight below) that maps the indices in the 2 x .... x 2 cell holding x -// into indices in the multi-cell. -// -// Including this construction, the overall complexity is -// O((input_dim + 2) * 2 ** (input_dim - 1)). -// -// Also when x[jj] < 0 or x[jj] > lattice_size[jj], the input is out of bound. -// So the change in the input should not change the output, therefore the -// gradient should be zero. -// - -template -std::vector HypercubeGradientOpKernel::ComputeGradWrtInput( - const LatticeStructure& lattice_structure, - typename TTypes::UnalignedConstFlat input, - typename TTypes::UnalignedConstFlat weight, - typename TTypes::UnalignedConstFlat grad_wrt_weight) const { - const BottomCornerIndexAndResidual bottom_corner_index_and_residual = - lattice_structure.GetBottomCornerIndexAndResidual(input); - const int64 input_dim = lattice_structure.Dimension(); - std::vector grad_wrt_input(input_dim, 0.0); - - // There are at most 2 ** n number of non-zero elements in weight. - // nnz_weight_index keeps the index of non-zero element in the weight. - // The following loop enumerats all vertices in cell in the following order. - // [0, 0, ..., 0], [1, 0, ...,0], [0, 1, ..., 0], ..., [1, 1, ..., 1]. - std::vector nnz_weight_index(lattice_structure.NumVerticesPerCell()); - - int64 current_dim = 0; - int64 current_bit = 1; // Always 1 << current_dim; - nnz_weight_index[0] = bottom_corner_index_and_residual.bottom_corner_index; - const std::vector& strides = lattice_structure.Strides(); - for (int64 ii = 1; ii < nnz_weight_index.size(); ++ii) { - if ((ii & current_bit) == 0) { - ++current_dim; - current_bit <<= 1; - } - // ii - current_bit is the base. - // ii is the current one, which is always an upper layer in the current - // dimension. - nnz_weight_index[ii] = - nnz_weight_index[ii - current_bit] + strides[current_dim]; - } - - // Compute the gradient for each input. - for (int64 ii = 0; ii < input_dim; ++ii) { - // If out_of_bound, gradient is 0. - if (bottom_corner_index_and_residual.out_of_bound[ii]) { - continue; - } - // Only process the bottom faces. - int64 bit = 1 << ii; - int64 stride = strides[ii]; - Dtype grad_ii = 0.0; - for (int64 index = 0; index < lattice_structure.NumVerticesPerCell(); - ++index) { - // Upper face. Skip this index. - if (index & bit) { - continue; - } - // Bottom face. - int64 lower_index = nnz_weight_index[index]; - int64 upper_index = lower_index + stride; - grad_ii += (weight(lower_index) + weight(upper_index)) * - (grad_wrt_weight(upper_index) - grad_wrt_weight(lower_index)); - } - grad_wrt_input[ii] = grad_ii; - } - - return grad_wrt_input; -} -// Register kernels for float and double. -REGISTER_KERNEL_BUILDER(Name("HypercubeInterpolation") - .Device(DEVICE_CPU) - .TypeConstraint("Dtype"), - HypercubeInterpolationOpKernel); - -REGISTER_KERNEL_BUILDER(Name("HypercubeInterpolation") - .Device(DEVICE_CPU) - .TypeConstraint("Dtype"), - HypercubeInterpolationOpKernel); - -REGISTER_KERNEL_BUILDER( - Name("HypercubeGradient").Device(DEVICE_CPU).TypeConstraint("Dtype"), - HypercubeGradientOpKernel); - -REGISTER_KERNEL_BUILDER(Name("HypercubeGradient") - .Device(DEVICE_CPU) - .TypeConstraint("Dtype"), - HypercubeGradientOpKernel); - -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/kernels/lattice_interpolation_base.cc b/tensorflow_lattice/cc/kernels/lattice_interpolation_base.cc deleted file mode 100644 index aceedb9..0000000 --- a/tensorflow_lattice/cc/kernels/lattice_interpolation_base.cc +++ /dev/null @@ -1,54 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow_lattice/cc/kernels/lattice_interpolation_base.h" - -#include - -#include "tensorflow_lattice/cc/lib/lattice_structure.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/lib/strings/str_util.h" - -namespace tensorflow { -namespace lattice { - -using errors::InvalidArgument; -using str_util::Join; - -LatticeOpBase::LatticeOpBase(OpKernelConstruction* context) - : OpKernel(context), cost_per_example_(1.0) { - std::vector lattice_sizes; - OP_REQUIRES_OK(context, context->GetAttr("lattice_sizes", &lattice_sizes)); - OP_REQUIRES(context, LatticeStructure::IsValidLatticeSizes(lattice_sizes), - InvalidArgument(Join(lattice_sizes, ","), - " is not a valid lattice size")); - lattice_structure_ = - std::unique_ptr(new LatticeStructure(lattice_sizes)); -} - -void LatticeOpBase::CheckShape(OpKernelContext* context, const Tensor& tensor, - const std::vector& expected_shape) const { - OP_REQUIRES(context, tensor.dims() == expected_shape.size(), - InvalidArgument("expect rank ", expected_shape.size(), "but got ", - tensor.DebugString())); - - for (int ii = 0; ii < expected_shape.size(); ++ii) { - OP_REQUIRES(context, tensor.dim_size(ii) == expected_shape[ii], - InvalidArgument("expect ", ii, "-dim: ", expected_shape[ii], - "but got ", tensor.DebugString())); - } -} - -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/kernels/lattice_interpolation_base.h b/tensorflow_lattice/cc/kernels/lattice_interpolation_base.h deleted file mode 100644 index 19456bb..0000000 --- a/tensorflow_lattice/cc/kernels/lattice_interpolation_base.h +++ /dev/null @@ -1,233 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -// Lattice interpolation base class. -#ifndef TENSORFLOW_LATTICE_CC_KERNELS_LATTICE_INTERPOLATION_BASE_H_ -#define TENSORFLOW_LATTICE_CC_KERNELS_LATTICE_INTERPOLATION_BASE_H_ - -#include -#include -#include - -#include "tensorflow_lattice/cc/lib/lattice_structure.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/tensor.h" -#include "tensorflow/core/util/work_sharder.h" - -namespace tensorflow { -namespace lattice { - -template -struct InterpolationWeights { - std::vector indices; - std::vector weights; -}; - -// LatticeOpBase class contains common part of all lattice operators as lattice -// structure initialization. -class LatticeOpBase : public OpKernel { - public: - explicit LatticeOpBase(OpKernelConstruction* context); - - // Returns the lattice_structure. - const LatticeStructure& GetLatticeStructure() const { - return *lattice_structure_; - } - - // Check whether the shape of tensor is same with expected_shape. - void CheckShape(OpKernelContext* context, const Tensor& tensor, - const std::vector& expected_shape) const; - - // Cost per example. - const int64 CostPerExample() const { return cost_per_example_; } - void SetCostPerExample(const int64 cost_per_example) { - cost_per_example_ = cost_per_example; - } - - private: - std::unique_ptr lattice_structure_; - int64 cost_per_example_; -}; - -// LatticeInterpolationOpBase is a base class for -// HypercubeInterpolationOpKernel and SimplexInterpolationOpKernel. -// The InterpolationWeights computation should be implemented in -// ComputeInterpolationWeights method. -template -class LatticeInterpolationOpBase : public LatticeOpBase { - public: - explicit LatticeInterpolationOpBase(OpKernelConstruction* context) - : LatticeOpBase(context) {} - - void Compute(OpKernelContext* context) override; - - protected: - virtual InterpolationWeights ComputeInterpolationWeights( - const LatticeStructure& lattice_structure, - typename TTypes::UnalignedConstFlat input_vector) const = 0; - - private: - // Apply InterpolationWeights to each slice of tensors. - void BatchInterpolationWorker(const Tensor& input_tensor, const int start, - const int limit, - Tensor* interpolation_weights_tensor) const; -}; - -template -void LatticeInterpolationOpBase::BatchInterpolationWorker( - const Tensor& input_tensor, const int start, const int limit, - Tensor* interpolation_weights_tensor) const { - for (int ii = start; ii < limit; ++ii) { - // Get iith input vector. - const auto input_row_ii = input_tensor.Slice(ii, ii + 1); - - // Compute weight-index pairs. - const InterpolationWeights interpolation_weights = - ComputeInterpolationWeights(GetLatticeStructure(), - input_row_ii.unaligned_flat()); - - // Get iith interpolation weight vector (output). - auto interpolation_weights_row_ii = - interpolation_weights_tensor->Slice(ii, ii + 1).unaligned_flat(); - - // Assign values to interpolation weight vector. - interpolation_weights_row_ii.setZero(); - DCHECK_EQ(interpolation_weights.indices.size(), - interpolation_weights.weights.size()); - for (int jj = 0; jj < interpolation_weights.indices.size(); ++jj) { - interpolation_weights_row_ii(interpolation_weights.indices[jj]) = - interpolation_weights.weights[jj]; - } - } -} - -template -void LatticeInterpolationOpBase::Compute(OpKernelContext* context) { - const LatticeStructure& lattice_structure = GetLatticeStructure(); - // Grab the input tensor. - const Tensor& input_tensor = context->input(0); - // Check the shapes. - const int64 batch_dim = input_tensor.dim_size(0); - const int64 input_dim = lattice_structure.Dimension(); - CheckShape(context, input_tensor, {batch_dim, input_dim}); - - // Allocate interpolation_weights_tensor. - Tensor* interpolation_weights_tensor = nullptr; - OP_REQUIRES_OK( - context, - context->allocate_output( - 0, TensorShape({batch_dim, lattice_structure.NumVertices()}), - &interpolation_weights_tensor)); - - auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); - - // Launch threads. - Shard(worker_threads.num_threads, worker_threads.workers, batch_dim, - CostPerExample(), [&](int start, int limit) { - BatchInterpolationWorker(input_tensor, start, limit, - interpolation_weights_tensor); - }); -} - -// LatticeGradientOpBase is a base class for HypercubeGradientOpKernel and -// SimplexGradientOpKernel. -// Computing Gradient with respect to input should be should be implemented in -// ComputeGradWrtInput method. -template -class LatticeGradientOpBase : public LatticeOpBase { - public: - explicit LatticeGradientOpBase(OpKernelConstruction* context) - : LatticeOpBase(context) {} - - void Compute(OpKernelContext* context) override; - - protected: - virtual std::vector ComputeGradWrtInput( - const LatticeStructure& lattice_structure, - typename TTypes::UnalignedConstFlat input_vector, - typename TTypes::UnalignedConstFlat weight_vector, - typename TTypes::UnalignedConstFlat grad_wrt_weight_vector) - const = 0; - - private: - // Apply grad_wrt_input_fn_ to each slice of tensors. - void BatchGradientWorker(const Tensor& input_tensor, - const Tensor& weight_tensor, - const Tensor& grad_wrt_weight_tensor, - const int start, const int limit, - Tensor* grad_wrt_input_tensor) const; -}; - -// BatchGradientWorker computes the gradient with respect to the input of each -// row. -template -void LatticeGradientOpBase::BatchGradientWorker( - const Tensor& input_tensor, const Tensor& weight_tensor, - const Tensor& grad_wrt_weight_tensor, const int start, const int limit, - Tensor* grad_wrt_input_tensor) const { - auto grad_wrt_input_matrix = grad_wrt_input_tensor->matrix(); - for (int ii = start; ii < limit; ++ii) { - const auto input_row_ii = input_tensor.Slice(ii, ii + 1); - const auto weight_row_ii = weight_tensor.Slice(ii, ii + 1); - const auto grad_wrt_weight_row_ii = - grad_wrt_weight_tensor.Slice(ii, ii + 1); - - const std::vector grad_wrt_input = ComputeGradWrtInput( - GetLatticeStructure(), input_row_ii.unaligned_flat(), - weight_row_ii.unaligned_flat(), - grad_wrt_weight_row_ii.unaligned_flat()); - - for (int jj = 0; jj < grad_wrt_input.size(); ++jj) { - grad_wrt_input_matrix(ii, jj) = grad_wrt_input[jj]; - } - } -} - -template -void LatticeGradientOpBase::Compute(OpKernelContext* context) { - const LatticeStructure& lattice_structure = this->GetLatticeStructure(); - const Tensor& input_tensor = context->input(0); - const Tensor& weight_tensor = context->input(1); - const Tensor& grad_wrt_weight_tensor = context->input(2); - // Check the shapes. - const int64 batch_dim = input_tensor.dim_size(0); - const int64 input_dim = lattice_structure.Dimension(); - CheckShape(context, input_tensor, {batch_dim, input_dim}); - CheckShape(context, weight_tensor, - {batch_dim, lattice_structure.NumVertices()}); - CheckShape(context, grad_wrt_weight_tensor, - {batch_dim, lattice_structure.NumVertices()}); - - // Dense implementation. - Tensor* grad_wrt_input_tensor = nullptr; - OP_REQUIRES_OK( - context, - context->allocate_output(0, TensorShape({batch_dim, input_dim}), - &grad_wrt_input_tensor)); - - auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); - - // Launch threads. - Shard(worker_threads.num_threads, worker_threads.workers, batch_dim, - CostPerExample(), [&](int start, int limit) { - BatchGradientWorker(input_tensor, weight_tensor, - grad_wrt_weight_tensor, start, limit, - grad_wrt_input_tensor); - }); -} - -} // namespace lattice -} // namespace tensorflow - -#endif // TENSORFLOW_LATTICE_CC_KERNELS_LATTICE_INTERPOLATION_BASE_H_ diff --git a/tensorflow_lattice/cc/kernels/lattice_raw_iterator.cc b/tensorflow_lattice/cc/kernels/lattice_raw_iterator.cc deleted file mode 100644 index 101dfc1..0000000 --- a/tensorflow_lattice/cc/kernels/lattice_raw_iterator.cc +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow_lattice/cc/kernels/lattice_raw_iterator.h" - -#include - -#include "tensorflow_lattice/cc/lib/lattice_structure.h" - -namespace tensorflow { -namespace lattice { - -void LatticeRawIterator::Next() { - ++index_; - for (int64 dim = 0; dim < lattice_sizes_.size(); ++dim) { - ++vertex_[dim]; - if (vertex_[dim] == lattice_sizes_[dim]) { - vertex_[dim] = 0; - } else { - break; - } - } -} - -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/kernels/lattice_raw_iterator.h b/tensorflow_lattice/cc/kernels/lattice_raw_iterator.h deleted file mode 100644 index 68e9fb9..0000000 --- a/tensorflow_lattice/cc/kernels/lattice_raw_iterator.h +++ /dev/null @@ -1,63 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -// LatticeRawIterator iterates all vertices in a multi-cell lattice in the -// column-major order. Note that this indexing (column-major order) should be -// consistent with LatticeStructure. -// -// Iteration example: -// for (LatticeRawIterator iter(lattice_structure) ; !iter.IsDone(); -// iter.Next()) { -// const int64 global_index = iter.Index(); -// const int64 vertex_first_dim = iter.VertexDim(0); -// const int64 vertex_second_dim = iter.VertexDim(1); -// } - -#ifndef TENSORFLOW_LATTICE_CC_KERNELS_LATTICE_RAW_ITERATOR_H_ -#define TENSORFLOW_LATTICE_CC_KERNELS_LATTICE_RAW_ITERATOR_H_ - -#include -#include - -#include "tensorflow_lattice/cc/lib/lattice_structure.h" - -namespace tensorflow { -namespace lattice { - -class LatticeRawIterator { - public: - explicit LatticeRawIterator(const LatticeStructure& lattice_structure) - : lattice_sizes_(lattice_structure.LatticeSizes()), - vertex_(lattice_structure.Dimension(), 0), - index_(0), - last_index_(lattice_structure.NumVertices()) {} - - // Forwards the iterator. - void Next(); - - bool IsDone() const { return index_ >= last_index_; } - int64 Index() const { return index_; } - const std::vector& Vertex() const { return vertex_; } - int64 VertexDim(const int64 dim) const { return vertex_[dim]; } - - private: - const std::vector lattice_sizes_; - std::vector vertex_; - int64 index_; - const int64 last_index_; -}; - -} // namespace lattice -} // namespace tensorflow -#endif // TENSORFLOW_LATTICE_CC_KERNELS_LATTICE_RAW_ITERATOR_H_ diff --git a/tensorflow_lattice/cc/kernels/lattice_raw_iterator_test.cc b/tensorflow_lattice/cc/kernels/lattice_raw_iterator_test.cc deleted file mode 100644 index 144825c..0000000 --- a/tensorflow_lattice/cc/kernels/lattice_raw_iterator_test.cc +++ /dev/null @@ -1,111 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow_lattice/cc/kernels/lattice_raw_iterator.h" - -#include - -#include "tensorflow_lattice/cc/lib/lattice_structure.h" -#include "tensorflow/core/lib/strings/str_util.h" -#include "tensorflow/core/platform/logging.h" -#include "tensorflow/core/platform/test.h" - -namespace tensorflow { -namespace lattice { - -namespace { -struct IndexVertexPair { - int64 index; - std::vector vertex; -}; -} // namespace - -// The fixture for testing LatticeRawIteration. -class LatticeRawIteratorTest : public ::testing::Test { - protected: - // Given the lattice sizes, iterate using RawIterator and check whether the - // iterator visits all expected index_vertex_pairs. - void CheckFullIteration( - const std::vector& lattice_sizes, - const std::vector& expected_index_vertex_pairs) { - LatticeStructure lattice_structure(lattice_sizes); - - // Iterate and collect indices and vertices. - std::vector visited_index_vertex_pairs; - for (LatticeRawIterator iter(lattice_structure); !iter.IsDone(); - iter.Next()) { - visited_index_vertex_pairs.push_back( - IndexVertexPair{iter.Index(), iter.Vertex()}); - LOG(INFO) << "visited_index : " << iter.Index() << " visited_vertex: [" - << str_util::Join(iter.Vertex(), ",") << "]"; - } - - // Check the result with the expected results. - CompareIndexVertexPairs(expected_index_vertex_pairs, - visited_index_vertex_pairs); - } - - private: - void CompareIndexVertexPairs( - const std::vector& index_vertex_pairs1, - const std::vector& index_vertex_pairs2) { - ASSERT_EQ(index_vertex_pairs1.size(), index_vertex_pairs2.size()); - const int num_pairs = index_vertex_pairs1.size(); - std::vector visited(num_pairs, false); - // n ** 2 comparsion. - for (const auto& index_vertex_pair2 : index_vertex_pairs2) { - for (int ii = 0; ii < num_pairs; ++ii) { - if (index_vertex_pair2.index == index_vertex_pairs1[ii].index && - index_vertex_pair2.vertex == index_vertex_pairs1[ii].vertex) { - visited[ii] = true; - break; - } - } - } - // Now check that we visited all index_vertex_pair in index_vertex_pairs1. - for (const bool is_visited : visited) { - EXPECT_TRUE(is_visited); - } - } -}; - -TEST_F(LatticeRawIteratorTest, FullIterationWithTwoByThree) { - CheckFullIteration( - /*lattice_sizes=*/{2, 3}, /*expected_index_vertex_pairs=*/{{0, {0, 0}}, - {1, {1, 0}}, - {2, {0, 1}}, - {3, {1, 1}}, - {4, {0, 2}}, - {5, {1, 2}}}); -} - -TEST_F(LatticeRawIteratorTest, FullIterationWithThreeByTwoByTwo) { - CheckFullIteration( - /*lattice_sizes=*/{3, 2, 2}, - /*expected_index_vertex_pairs=*/{{0, {0, 0, 0}}, - {1, {1, 0, 0}}, - {2, {2, 0, 0}}, - {3, {0, 1, 0}}, - {4, {1, 1, 0}}, - {5, {2, 1, 0}}, - {6, {0, 0, 1}}, - {7, {1, 0, 1}}, - {8, {2, 0, 1}}, - {9, {0, 1, 1}}, - {10, {1, 1, 1}}, - {11, {2, 1, 1}}}); -} - -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/kernels/monotone_lattice_kernels.cc b/tensorflow_lattice/cc/kernels/monotone_lattice_kernels.cc deleted file mode 100644 index 6ff06ea..0000000 --- a/tensorflow_lattice/cc/kernels/monotone_lattice_kernels.cc +++ /dev/null @@ -1,184 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include -#include - -#include "tensorflow_lattice/cc/kernels/lattice_interpolation_base.h" -#include "tensorflow_lattice/cc/kernels/monotonic_lattice_projections.h" -#include "tensorflow_lattice/cc/lib/lattice_structure.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/tensor.h" -#include "tensorflow/core/util/work_sharder.h" - -namespace tensorflow { -namespace lattice { - -// MonotoneLatticeOp returns the projected lattice param vectors onto the -// subspace that satisfies monotonicity constraints specified by is_monotone. -// If is_monotone[k] == true, then kth input will have a non-decreasing -// monotonicity constraint, and is_monotone[k] == false, then then kth input has -// no monotonicity constraints. -// -// Lattice param tensor is expected to be a 2d tensor, [num_outputs, -// num_parameters], where each row represents a parameter from multi-cell -// lattice. -template -class MonotoneLatticeOp : public LatticeOpBase { - public: - static_assert(std::is_floating_point::value, - "Dtype needs to be a floating point"); - - explicit MonotoneLatticeOp(OpKernelConstruction* context); - void Compute(OpKernelContext* context) final; - - TF_DISALLOW_COPY_AND_ASSIGN(MonotoneLatticeOp); - - private: - void ProjectionWorker(const Tensor& lattice_params_tensor, int start, - int limit, int num_parameters, - Tensor* projection_tensor, - OpKernelContext* context) const; - - std::unique_ptr> projector_; -}; - -template -MonotoneLatticeOp::MonotoneLatticeOp(OpKernelConstruction* context) - : LatticeOpBase(context) { - std::vector is_monotone; - float tolerance; - int64 max_iter; - - OP_REQUIRES_OK(context, context->GetAttr("is_monotone", &is_monotone)); - OP_REQUIRES_OK(context, context->GetAttr("tolerance", &tolerance)); - OP_REQUIRES_OK(context, context->GetAttr("max_iter", &max_iter)); - - const int64 lattice_dim = GetLatticeStructure().Dimension(); - OP_REQUIRES(context, (is_monotone.size() == lattice_dim), - errors::InvalidArgument( - "lattice dimension :", lattice_dim, - " != ", "is_monotone dimension: ", is_monotone.size())); - - std::vector monotone_dims; - for (int ii = 0; ii < lattice_dim; ++ii) { - if (is_monotone[ii]) { - monotone_dims.push_back(ii); - } - } - - projector_ = std::unique_ptr>( - new MonotoneLatticeProjector(GetLatticeStructure(), monotone_dims, - tolerance, max_iter)); - - - constexpr int64 kInitCost = 20; - constexpr int64 kBaseCost = 20; - constexpr int64 kConstraintCost = 20; - // For initilaization: constant0 * GetLatticeStructure().NumVertices(). - // Each iteration in ADMM: - // 1. Projection for each constraint: constant1 * NumVertices(). - // 2. Center variable update: constant2 * NumVertices() - // 3. Dual variable update for each constraint: constant3 * - // NumVertices(). - // Therefore, the total cost of each iteration is - // ((constant1 + constant3) * number of monotone dimensions + constant2) * - // NumVertices(). - // The number of iteration is bounded by min(max_iter, O(||true_projection - - // initial_point||_2/epsilon)). But since the latter is hard to obtain, we use - // max_iter as an upper bound. - // So the total cost is given by - // - // ((kConstraintCost * monotone_dims.size() + kBaseCost) * max_iter + - // kInitCost) * GetLatticeStructure().NumVertices() - const int64 cost_per_example = - ((kConstraintCost * monotone_dims.size() + kBaseCost) * max_iter + - kInitCost) * - GetLatticeStructure().NumVertices(); - SetCostPerExample(cost_per_example); -} - -template -void MonotoneLatticeOp::Compute(OpKernelContext* context) { - // Grab the param tensor. Expect [num_ouputs, num_parameters] tensor. - const Tensor& lattice_params_tensor = context->input(0); - - OP_REQUIRES(context, lattice_params_tensor.dims() == 2, - errors::InvalidArgument("expected a 2d tensor, got ", - lattice_params_tensor.dims())); - OP_REQUIRES( - context, - lattice_params_tensor.dim_size(1) == GetLatticeStructure().NumVertices(), - errors::InvalidArgument( - "expected parameter dimension: ", GetLatticeStructure().NumVertices(), - "got: ", lattice_params_tensor.dim_size(1))); - const int64 num_outputs = lattice_params_tensor.dim_size(0); - const int64 num_parameters = lattice_params_tensor.dim_size(1); - - Tensor* projection_tensor = nullptr; - OP_REQUIRES_OK(context, context->allocate_output( - 0, TensorShape({num_outputs, num_parameters}), - &projection_tensor)); - - auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); - - // A worker that projects lattice_params_tensor[start : start + limit - 1, :] - // and saves the result to from the - // projection_tensor[start : start + limit - 1, :]. - // This lambda captures everything including "this" to use ProjectionWorker - // method and all of captured states' lifetime is longer than Shard operation. - auto worker = [&](int start, int limit) { - ProjectionWorker(lattice_params_tensor, start, limit, num_parameters, - projection_tensor, context); - }; - // Launch threads. - Shard(worker_threads.num_threads, worker_threads.workers, num_outputs, - CostPerExample(), worker); -} - -template -void MonotoneLatticeOp::ProjectionWorker( - const Tensor& lattice_params_tensor, const int start, const int limit, - const int num_parameters, Tensor* projection_tensor, - OpKernelContext* context) const { - auto lattice_params_matrix = lattice_params_tensor.matrix(); - auto projection_matrix = projection_tensor->matrix(); - for (int row = start; row < limit; ++row) { - // Computing the projection per each row. - std::vector lattice_params_vec(num_parameters); - std::vector projected_lattice_params_vec(num_parameters, 0.0); - - // Fetching the lattice parameter. - for (int ii = 0; ii < num_parameters; ++ii) { - lattice_params_vec[ii] = lattice_params_matrix(row, ii); - } - OP_REQUIRES_OK(context, projector_->Project(lattice_params_vec, - &projected_lattice_params_vec)); - // Fill-in projected params. - for (int ii = 0; ii < num_parameters; ++ii) { - projection_matrix(row, ii) = projected_lattice_params_vec[ii]; - } - } -} - -// Register kernels for float and double. -REGISTER_KERNEL_BUILDER( - Name("MonotoneLattice").Device(DEVICE_CPU).TypeConstraint("Dtype"), - MonotoneLatticeOp); -REGISTER_KERNEL_BUILDER( - Name("MonotoneLattice").Device(DEVICE_CPU).TypeConstraint("Dtype"), - MonotoneLatticeOp); - -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/kernels/monotonic_lattice_projections.h b/tensorflow_lattice/cc/kernels/monotonic_lattice_projections.h deleted file mode 100644 index 767272e..0000000 --- a/tensorflow_lattice/cc/kernels/monotonic_lattice_projections.h +++ /dev/null @@ -1,326 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -// Project lattice parameter vector onto monotonicity constraints. -#ifndef TENSORFLOW_LATTICE_CC_KERNELS_MONOTONIC_LATTICE_PROJECTIONS_H_ -#define TENSORFLOW_LATTICE_CC_KERNELS_MONOTONIC_LATTICE_PROJECTIONS_H_ - -#include -#include -#include -#include - -#include "tensorflow_lattice/cc/kernels/lattice_raw_iterator.h" -#include "tensorflow_lattice/cc/kernels/monotonic_projections.h" -#include "tensorflow_lattice/cc/lib/lattice_structure.h" - -#include "tensorflow/core/lib/core/errors.h" -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow/core/platform/logging.h" - -namespace tensorflow { -namespace lattice { - -// Monotone Lattice projector projects lattice parameter to monotonicity -// constraints specified by monotone_dimensions. -// monotone_dimensions contains a index of (increasing) monotonic dimension. -// For example, if we want to impose the monotonicity constraint in the 0th and -// 2th dimensions, then monotone_dimensions = {0, 2}. -// -// The implementation uses Alternating Direction Method of Multipliers (ADMM) -// parallel projection. See Distributed Optimization and Statistical Learning -// via the Alternating Direction Method of Multipliers -// (http://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf) Section -// 5.1.2. Parallel projeciton and Chapter 7. for the theoritical background. -// -// Suppose we have K number of convex sets, C_1, ..., C_K, and we want to -// project a variable x_0 in R^n to the intersection of C_1, ..., C_K. -// Let x_1, ..., x_K in R^n, and d_1, ..., d_K in R^n. -// The ADMM parallel projection works as follows. -// -// Step 0: Initialize x_center = x_0, and d_k = 0 for all k = 1, ..., K. -// Step 1: x_k <- Projection of (d_k + x_center) onto C_i for all i = 1, ..., -// K. -// Step 2: x_center <- 0.5 * x_0 + 0.5 * 1/K * sum_k (x_k - d_k). -// Step 3: d_k <- d_k + x_center - x_k. -// Step 4: Go back to the Step 1 if sum_k ||x_center - x_k||_1 > eps. -// -// Step 1 generates x_k in C_k. However, x_k may not be in C_i where i \neq k. -// However, the algorithm is guaranteed to converge, which implies d_k should -// stop being updated after many iterations. -// Therefore, x_center == x_k for all k eventually. Since x_center == x_1 == ... -// == x_K, we can conclude that x_center is in the intersection of C_1, ..., -// C_K. -// Step 2 generates x_center that minimizes ||x_center - x_0||_2^2 + some -// regularization terms. Upon convergence, regularization temrs are zero. -// Therefore, x_center == the projeciton of x_0 onto the intersection of C_1, -// ..., C_K, when the algorithm converges. -// -// In the following implementation, we set each C_k to be the set of -// lattice_param_vec that satisfies one dimensional monotonicity constraint. -// Assuming we have K number of monotone dimensions, the ADMM algorithm -// perform the projection for a given lattice_param_vec as follows: -// Step 0: Initialize center = lattice_param_vec and duals[k] = -// std::vector(param_size, 0.0) for k = 0, ..., K - 1. -// Step 1: params[k] <- Projection of (duals[k] + center) onto the kth 1D -// monotonicity constraint. (Here + means an elementwise summation.) for k = -// 0, ..., K - 1. -// Step 2: center <- 0.5 * lattice_param_vec + 0.5 * 1/K * sum_k (params[k] - -// duals[k]) -// Step 3: duals[k] += (center - params[k]) for k = 0, ..., K - 1. -// Step 4: Repeat Step 1 until sum_k ||center - params[k]||_1 < epsilon, or -// Step 1 was repeated more than max_iter times. -template -class MonotoneLatticeProjector { - public: - static_assert(std::is_floating_point::value, - "Dtype needs to be a floating point"); - - explicit MonotoneLatticeProjector(const LatticeStructure& lattice_structure, - const std::vector& monotone_dimensions, - const Dtype epsilon = 1e-7, - const int64 max_iter = 100000); - - // Apply ADMM projections, and save the result to the projected_param. - Status Project(const std::vector& lattice_param_vec, - std::vector* projected_lattice_param_vec) const; - - private: - // This projector computes the projection of lattice parameter vector onto the - // per dimension monotonicity constraints. - // - // For example, consider 3 x 3 lattice: - // - // 2---------5--------8 - // | | | - // | | | - // 1---------4--------7 - // | | | - // | | | - // 0---------3--------6 - // - // For the 0th dimension, we have - // weight[0] <= weight[3] <= weight[6] - // weight[1] <= weight[4] <= weight[7] - // weight[2] <= weight[5] <= weight[8]. - // - // So PerDimensionProjector(lattice_structure, 0) will project the - // lattice_param_vec onto the constraints of the given dimension. - // - // For the 1th dimension, we have - // weight[0] <= weight[1] <= weight[2] - // weight[3] <= weight[4] <= weight[5] - // weight[6] <= weight[7] <= weight[8]. - // - // So PerDimensionProjector(lattice_structure, 1) will project - // lattice_param_vec onto the constraints of the given dimension. - class PerDimensionProjector { - public: - explicit PerDimensionProjector(const LatticeStructure& lattice_structure, - const int64 dimension); - - // Apply projection, and save the result to the lattice_param_vec. - void Project(std::vector* lattice_param_vec) const; - - private: - // Helper function that returns the base indices of a given LatticeStructure - // and dimension. - static std::vector BaseIndices( - const LatticeStructure& lattice_structure, const int64 dimension); - - const int64 lattice_size_; - const int64 stride_; - const std::vector base_indices_; - }; - - const Dtype epsilon_; - const int64 max_iter_; - int64 param_size_; - std::vector projectors_; -}; - -// Implementation of PerDimensionProjector's methods. -template -MonotoneLatticeProjector::PerDimensionProjector::PerDimensionProjector( - const LatticeStructure& lattice_structure, const int64 dimension) - : lattice_size_(lattice_structure.LatticeSize(dimension)), - stride_(lattice_structure.Stride(dimension)), - base_indices_(BaseIndices(lattice_structure, dimension)) {} - -template -std::vector -MonotoneLatticeProjector::PerDimensionProjector::BaseIndices( - const LatticeStructure& lattice_structure, const int64 dimension) { - std::vector base_indices; - - for (LatticeRawIterator iter(lattice_structure); !iter.IsDone(); - iter.Next()) { - if (iter.VertexDim(dimension) == 0) { - base_indices.push_back(iter.Index()); - } - } - return base_indices; -} - - -template -void MonotoneLatticeProjector::PerDimensionProjector::Project( - std::vector* lattice_param_vec_ptr) const { - DCHECK(lattice_param_vec_ptr); - - std::vector& lattice_param_vec = *lattice_param_vec_ptr; - for (const int64 base_index : base_indices_) { - std::vector lattice_slice(lattice_size_); - // Find the slice of lattice parameter vector. - int64 current_index = base_index; - for (Dtype& value : lattice_slice) { - value = lattice_param_vec[current_index]; - current_index += stride_; - } - - // Make a projection. - std::vector projected_slice = - VectorMonotonicProjection(lattice_slice, std::less_equal()); - - // Fill in the result. - current_index = base_index; - for (const Dtype value : projected_slice) { - lattice_param_vec[current_index] = value; - current_index += stride_; - } - } -} - -// Implementation of MonotoneLatticeProjector's methods. -template -MonotoneLatticeProjector::MonotoneLatticeProjector( - const LatticeStructure& lattice_structure, - const std::vector& monotone_dimensions, const Dtype epsilon, - const int64 max_iter) - : epsilon_(epsilon), - max_iter_(max_iter), - param_size_(lattice_structure.NumVertices()) { - for (const int dim : monotone_dimensions) { - projectors_.push_back(PerDimensionProjector(lattice_structure, dim)); - } -} - -// Apply ADMM projections. -template -Status MonotoneLatticeProjector::Project( - const std::vector& lattice_param_vec, - std::vector* projected_lattice_param_vec) const { - if (lattice_param_vec.size() != param_size_) { - return errors::InvalidArgument("lattice_param_vec's size (", - lattice_param_vec.size(), - ") != param_size (", param_size_, ")"); - } - - if (!projected_lattice_param_vec) { - return errors::InvalidArgument("projected_lattice_param_vec is nullptr"); - } - if (projected_lattice_param_vec->size() != param_size_) { - return errors::InvalidArgument("projected_lattice_param_vec's size (", - projected_lattice_param_vec->size(), - ") != param_size (", param_size_, ")"); - } - - // No projection at all. Make a deep copy, then return. - if (projectors_.empty()) { - *projected_lattice_param_vec = lattice_param_vec; - return Status::OK(); - } - - // Only one projection. No need for running a complicated projection. - if (projectors_.size() == 1) { - // Make a deep copy, then project. - *projected_lattice_param_vec = lattice_param_vec; - projectors_[0].Project(projected_lattice_param_vec); - return Status::OK(); - } - - // Initialize all variables. - // 1. Center: This contains a reference to the projected lattice parameter - // vector. - // 2. Param_per_cluster. - // 3. Deviation_per_cluster. - std::vector& center = *projected_lattice_param_vec; - const int param_size = lattice_param_vec.size(); - const int num_clusters = projectors_.size(); - - // Initial point is a deep copy of lattice_param_vec. - center = lattice_param_vec; - std::vector> param_per_cluster( - num_clusters, std::vector(param_size, 0.0)); - std::vector> duals(num_clusters, - std::vector(param_size, 0.0)); - - Dtype residual = std::numeric_limits::max(); - int64 iter = 0; - const Dtype average_scale = 0.5 / static_cast(num_clusters); - - - while (residual > epsilon_) { - // Step 1. Update parameter in each cluster by applying projections. - for (int ii = 0; ii < num_clusters; ++ii) { - // Step 1-1. Update param_per_cluster[ii] == center + duals[ii]. - const std::vector& duals_ii = duals[ii]; - std::vector& param_ii = param_per_cluster[ii]; - for (int jj = 0; jj < param_size; ++jj) { - param_ii[jj] = duals_ii[jj] + center[jj]; - } - // Step 1-2. Project onto the monotonicity constraint. - projectors_[ii].Project(¶m_ii); - } - - // Step 2. Update the center. - // center = 1/2 * lattice_param_vec + 1/2 * (Average(param_per_cluster) - - // Average(dual)) - center.assign(param_size, 0); - for (int ii = 0; ii < num_clusters; ++ii) { - const std::vector& dual = duals[ii]; - const std::vector& param = param_per_cluster[ii]; - for (int jj = 0; jj < param_size; ++jj) { - center[jj] += (param[jj] - dual[jj]); - } - } - for (int ii = 0; ii < param_size; ++ii) { - center[ii] *= average_scale; - center[ii] += 0.5 * lattice_param_vec[ii]; - } - - // Step 3. Update the dual and residual - residual = 0; - for (int ii = 0; ii < num_clusters; ++ii) { - std::vector& dual = duals[ii]; - const std::vector& param = param_per_cluster[ii]; - for (int jj = 0; jj < param_size; ++jj) { - const Dtype diff = center[jj] - param[jj]; - dual[jj] += diff; - residual += std::abs(diff); - } - } - - ++iter; - if (iter > max_iter_) { - break; - } - } - return Status::OK(); -} - -} // namespace lattice -} // namespace tensorflow - -#endif // TENSORFLOW_LATTICE_CC_KERNELS_MONOTONIC_LATTICE_PROJECTIONS_H_ diff --git a/tensorflow_lattice/cc/kernels/monotonic_lattice_projections_test.cc b/tensorflow_lattice/cc/kernels/monotonic_lattice_projections_test.cc deleted file mode 100644 index 18e8525..0000000 --- a/tensorflow_lattice/cc/kernels/monotonic_lattice_projections_test.cc +++ /dev/null @@ -1,135 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow_lattice/cc/kernels/monotonic_lattice_projections.h" - -#include - -#include "tensorflow_lattice/cc/lib/lattice_structure.h" -#include "tensorflow/core/lib/core/status_test_util.h" -#include "tensorflow/core/lib/strings/str_util.h" -#include "tensorflow/core/platform/logging.h" -#include "tensorflow/core/platform/test.h" - -namespace tensorflow { -namespace lattice { -namespace { -TEST(MonotoneLatticeProjectorErrorTest, ProjectionWithNullptr) { - LatticeStructure lattice_structure(/*lattice_sizes=*/{2, 2}); - MonotoneLatticeProjector projector(lattice_structure, - /*monotone_dimensions=*/{}); - const Status s = - projector.Project(/*lattice_param_vec=*/{0, 1, 2, 3}, nullptr); - EXPECT_EQ(s.code(), error::INVALID_ARGUMENT); -} - -TEST(MonotoneLatticeProjectorErrorTest, ProjectionWithWrongInputDimension) { - LatticeStructure lattice_structure(/*lattice_sizes=*/{2, 2}); - MonotoneLatticeProjector projector(lattice_structure, - /*monotone_dimensions=*/{}); - std::vector output(4, 0.0); - const Status s = projector.Project(/*lattice_param_vec=*/{0, 1, 2}, &output); - EXPECT_EQ(s.code(), error::INVALID_ARGUMENT); -} - -TEST(MonotoneLatticeProjectorErrorTest, ProjectionWithWrongOutputDimension) { - LatticeStructure lattice_structure(/*lattice_sizes=*/{2, 2}); - MonotoneLatticeProjector projector(lattice_structure, - /*monotone_dimensions=*/{}); - std::vector output(3, 0.0); - const Status s = - projector.Project(/*lattice_param_vec=*/{0, 1, 2, 3}, &output); - EXPECT_EQ(s.code(), error::INVALID_ARGUMENT); -} - -// The fixture for testing MonotoneLatticeProjector. -class MonotoneLatticeProjectorTest : public ::testing::Test { - protected: - void CheckProjection( - const std::vector& lattice_sizes, - const std::vector& monotone_dimensions, - const std::vector& lattice_param_vec, - const std::vector& expected_projected_lattice_param_vec) { - LatticeStructure lattice_structure(lattice_sizes); - MonotoneLatticeProjector projector(lattice_structure, - monotone_dimensions, kEpsilon); - std::vector projected_lattice_param_vec(lattice_param_vec.size()); - TF_ASSERT_OK( - projector.Project(lattice_param_vec, &projected_lattice_param_vec)); - LOG(INFO) << "lattice param: " << str_util::Join(lattice_param_vec, ","); - LOG(INFO) << "Expected projected lattice param: " - << str_util::Join(expected_projected_lattice_param_vec, ","); - LOG(INFO) << "Projected lattice param: " - << str_util::Join(projected_lattice_param_vec, ","); - - ASSERT_EQ(projected_lattice_param_vec.size(), - expected_projected_lattice_param_vec.size()); - for (int ii = 0; ii < expected_projected_lattice_param_vec.size(); ++ii) { - EXPECT_NEAR(expected_projected_lattice_param_vec[ii], - projected_lattice_param_vec[ii], kEpsilon); - } - } - - private: - const float kEpsilon = 1e-5; -}; - -TEST_F(MonotoneLatticeProjectorTest, ProjectToNothing) { - CheckProjection( - /*lattice_sizes=*/{2, 2}, /*monotone_dimensions=*/{}, - /*lattice_param_vec=*/{3.0, 0.0, 2.0, 5.0}, - /*expected_projected_lattice_param_vec=*/{3.0, 0.0, 2.0, 5.0}); -} - -TEST_F(MonotoneLatticeProjectorTest, ProjectTo0thDimension) { - CheckProjection( - /*lattice_sizes=*/{2, 2}, /*monotone_dimensions=*/{0}, - /*lattice_param_vec=*/{3.0, 0.0, 2.0, 5.0}, - /*expected_projected_lattice_param_vec=*/{1.5, 1.5, 2.0, 5.0}); -} - -TEST_F(MonotoneLatticeProjectorTest, ProjectTo1stDimension) { - CheckProjection( - /*lattice_sizes=*/{2, 2}, /*monotone_dimensions=*/{1}, - /*lattice_param_vec=*/{3.0, 0.0, 2.0, 5.0}, - /*expected_projected_lattice_param_vec=*/{2.5, 0.0, 2.5, 5.0}); -} - -TEST_F(MonotoneLatticeProjectorTest, ProjectToAllDimensions) { - CheckProjection( - /*lattice_sizes=*/{2, 2}, /*monotone_dimensions=*/{0, 1}, - /*lattice_param_vec=*/{3.0, 0.0, 2.0, 5.0}, - /*expected_projected_lattice_param_vec=*/{1.5, 1.5, 2.0, 5.0}); -} - -TEST_F(MonotoneLatticeProjectorTest, ProjectThreeByTwoLatticeToAllDimensions) { - CheckProjection( - /*lattice_sizes=*/{3, 2}, /*monotone_dimensions=*/{0, 1}, - /*lattice_param_vec=*/{3.0, 1.0, 0.0, 0.0, 2.0, 5.0}, - /*expected_projected_lattice_param_vec=*/{1.0, 1.0, 1.0, 1.0, 2.0, 5.0}); -} - -TEST_F(MonotoneLatticeProjectorTest, - ProjectTwoByTwoByTwoLatticeToAllDimensions) { - CheckProjection( - /*lattice_sizes=*/{2, 2, 2}, - /*monotone_dimensions=*/{0, 1, 2}, - /*lattice_param_vec=*/{0.44, 0.3, 0.12, 3.33, 3.0, 0.0, 2.0, 5.0}, - /*expected_projected_lattice_param_vec=*/{0.28, 0.3, 0.28, 3.33, 1.5, 1.5, - 2.0, 5.0}); -} - -} // namespace -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/kernels/monotonic_projection_kernel.cc b/tensorflow_lattice/cc/kernels/monotonic_projection_kernel.cc deleted file mode 100644 index 018d9b9..0000000 --- a/tensorflow_lattice/cc/kernels/monotonic_projection_kernel.cc +++ /dev/null @@ -1,93 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include -#include -#include - -#include "tensorflow_lattice/cc/kernels/monotonic_projections.h" -#include "tensorflow/core/framework/op.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/tensor_types.h" -#include "tensorflow/core/framework/types.pb.h" - -namespace tensorflow { -namespace lattice { - -namespace { - -template -bool CmpLesserOrEqual(const Dtype a, const Dtype b) { - return a <= b; -} - -template -bool CmpGreaterOrEqual(const Dtype a, const Dtype b) { - return a >= b; -} - -} // namespace - -template -class MonotonicProjectionOpKernel : public OpKernel { - public: - explicit MonotonicProjectionOpKernel(OpKernelConstruction* context) - : OpKernel(context) {} - - void Compute(OpKernelContext* context) override { - const Tensor& values_tensor = context->input(0); - const Tensor& increasing_tensor = context->input(1); - - OP_REQUIRES( - context, values_tensor.dims() == 1, - errors::InvalidArgument("values must have dims=1, got values.dims=", - values_tensor.dims())); - OP_REQUIRES(context, increasing_tensor.dims() == 0, - errors::InvalidArgument( - "increasing must be a boolean scalar, got increasing.dims=", - increasing_tensor.dims())); - OP_REQUIRES( - context, increasing_tensor.dtype() == DT_BOOL, - errors::InvalidArgument( - "increasing must be a boolean scalar, got increasing.dtype=", - DataType_Name(increasing_tensor.dtype()))); - - Tensor* monotonic_tensor = nullptr; - OP_REQUIRES_OK( - context, - context->allocate_output(0, values_tensor.shape(), &monotonic_tensor)); - - // Copy the current non-monotonic values and project them to monotonicity. - *monotonic_tensor = values_tensor; - if (increasing_tensor.scalar()()) { - TensorVectorMonotonicProjection(monotonic_tensor->vec(), - CmpLesserOrEqual); - } else { - TensorVectorMonotonicProjection(monotonic_tensor->vec(), - CmpGreaterOrEqual); - } - } -}; - -REGISTER_KERNEL_BUILDER(Name("MonotonicProjection") - .Device(DEVICE_CPU) - .TypeConstraint("Dtype"), - MonotonicProjectionOpKernel); -REGISTER_KERNEL_BUILDER(Name("MonotonicProjection") - .Device(DEVICE_CPU) - .TypeConstraint("Dtype"), - MonotonicProjectionOpKernel); - -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/kernels/monotonic_projections.h b/tensorflow_lattice/cc/kernels/monotonic_projections.h deleted file mode 100644 index 545ae6d..0000000 --- a/tensorflow_lattice/cc/kernels/monotonic_projections.h +++ /dev/null @@ -1,161 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -// Functions that calculate monotonic projections. -#ifndef TENSORFLOW_LATTICE_CC_KERNELS_MONOTONIC_PROJECTIONS_H_ -#define TENSORFLOW_LATTICE_CC_KERNELS_MONOTONIC_PROJECTIONS_H_ - -#include - -#include "tensorflow/core/framework/tensor.h" - -namespace tensorflow { -namespace lattice { - -// Converts a vector to a non-strictly monotonic vector that minimizes squared -// distance to original vector values. -// -// monotonic_cmp is the comparison function that defines the direction of -// the monotonicity. monotonic_cmp(a,b) should return true if a followed -// by b is considered monotonic (equal values should always be considered -// monotonic). monotonic_cmp should be transitive and -// monotonic_cmp(a,b) && monotonic_cmp(b,a) iff a == b. -template -std::vector VectorMonotonicProjection(const std::vector& input, - const CmpFn monotonic_cmp); - -// Converts a Tensor vector to a non-strictly monotonic vector that minimizes -// squared distance to original vector values. -// -// monotonic_cmp is the comparison function that defines the direction of -// the monotonicity. monotonic_cmp(a,b) should return true if a followed -// by b is considered monotonic (equal values should always be considered -// monotonic). monotonic_cmp should be transitive and -// monotonic_cmp(a,b) && monotonic_cmp(b,a) iff a == b. -template -void TensorVectorMonotonicProjection(typename TTypes::Vec values, - const CmpFn monotonic_cmp); - -// Converts a vector to a non-strictly monotonic vector that minimizes squared -// distance to original vector values. -// -// Given a vector, input, it finds a non-strictly monotonic vector, output, such -// that: -// -// 1. cmp_fn(output[i], output[i + 1]) == true for all 0 <= i < n -1 -// (e.g., output[0] <= output[1] <= ... <= output[n -1]) -// 2. minimizes || input - output ||_2 -// -// This is a implementation special case of pool adjacent violators (PAV) -// algorithm. -// -// To use it one provides a comparison function (that defines the desired -// monotonicity direction) and Insert() one value at a time, in order. -// -// In the end one can project the monotonic vector into a std::vector or -// directly into a Tensor vector. -template -class MonotonicProjector { - public: - // size is the size of the vector to be projected to monotonicity. - // monotonic_cmp is the comparison function that defines the direction of - // the monotonicity. monotonic_cmp(a,b) should return true if a followed - // by b is considered monotonic (equal values should always be considered - // monotonic). monotonic_cmp should be transitive and - // monotonic_cmp(a,b) && monotonic_cmp(b,a) iff a == b. - explicit MonotonicProjector(const int size, const CmpFn monotonic_cmp) - : size_(size), monotonic_cmp_(monotonic_cmp) { - pool_list_.reserve(size); - } - - // Insert value to end of pool list keeping list monotonic according to - // monotonic_cmp_. - void Insert(Dtype value) { - Pool new_pool{1, value, value}; - // While new_pool wouldn't be properly monotonic, merge the pool with the - // previous one. - while (!pool_list_.empty() && - !monotonic_cmp_(pool_list_.back().mean, new_pool.mean)) { - // If last pool would break monotonicity, - new_pool.size += pool_list_.back().size; - new_pool.sum += pool_list_.back().sum; - new_pool.mean = new_pool.sum / new_pool.size; - pool_list_.pop_back(); - } - pool_list_.push_back(new_pool); - } - - // Copies monotonic projection to Tensor vector. - void ProjectToTensorVector(typename TTypes::Vec output) { - int output_index = 0; - for (const auto& pool : pool_list_) { - for (const int limit = output_index + pool.size; output_index < limit; - ++output_index) { - output(output_index) = pool.mean; - } - } - } - - // Returns monotonic projection as vector. - std::vector ProjectToVector() { - std::vector output(size_); - int output_index = 0; - for (const auto& pool : pool_list_) { - for (const int limit = output_index + pool.size; output_index < limit; - ++output_index) { - output[output_index] = pool.mean; - } - } - return output; - } - - private: - struct Pool { - int size; // Number of elements in pool. - Dtype sum, mean; // Sum and mean of all values in pool. - }; - - const int size_; - std::vector pool_list_; - const CmpFn monotonic_cmp_; -}; - -// Implementation details - -// START_SKIP_DOXYGEN -template -std::vector VectorMonotonicProjection(const std::vector& input, - const CmpFn monotonic_cmp) { - MonotonicProjector projector(input.size(), monotonic_cmp); - for (const Dtype value : input) { - projector.Insert(value); - } - return projector.ProjectToVector(); -} - -template -void TensorVectorMonotonicProjection(typename TTypes::Vec values, - const CmpFn monotonic_cmp) { - MonotonicProjector projector(values.size(), monotonic_cmp); - for (int i = 0; i < values.size(); ++i) { - projector.Insert(values(i)); - } - projector.ProjectToTensorVector(values); -} -// END_SKIP_DOXYGEN - -} // namespace lattice -} // namespace tensorflow - -#endif // TENSORFLOW_LATTICE_CC_KERNELS_MONOTONIC_PROJECTIONS_H_ diff --git a/tensorflow_lattice/cc/kernels/pwl_indexing_calibrator_kernels.cc b/tensorflow_lattice/cc/kernels/pwl_indexing_calibrator_kernels.cc deleted file mode 100644 index aab1669..0000000 --- a/tensorflow_lattice/cc/kernels/pwl_indexing_calibrator_kernels.cc +++ /dev/null @@ -1,759 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include -#include -#include -#include - -#include "tensorflow/core/framework/common_shape_fns.h" -#include "tensorflow/core/framework/op.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/shape_inference.h" -#include "tensorflow/core/framework/tensor_types.h" -#include "tensorflow/core/lib/core/errors.h" -#include "tensorflow/core/util/sparse/sparse_tensor.h" -#include "tensorflow/core/util/work_sharder.h" - -namespace tensorflow { -namespace lattice { - -namespace { - -// Maximum number of points used by interpolation. It may use up to 3 when it's -// exactly on top of a keypoint input -- it returns also the left and right -// keypoints inputs (indices). See explanation below on -// FindExpandedInterpolation. -constexpr int kMaxNumInterpolationPoints = 3; - -// Changed with PwlSetDebugMode function. This variable forces each row of a -// batch to be processed by a separate worker, only used for testing. -bool test_force_split = false; - -} // namespace - -extern void PwlSetTestMode(bool split_batches); -void PwlSetTestMode(const bool split_batches) { - test_force_split = split_batches; -} - -// Helper struct that holds all information needed to resolve one interpolation: -// the number of consecutive points used (num_points), the index of the first -// one (lower_index) the associated weights -- not used in every case. -template -struct InterpolationPoints { - int num_points; - int64_t lower_index; - Dtype weights[kMaxNumInterpolationPoints]; -}; - -namespace { - -// Find the interpolation points, but _not the weights_, for the given -// uncalibrated value and keypoints inputs (kp_inputs). -// The interpolation will be between kp_inputs[lower_index] and -// kp_inputs[lower_index + 1]. Except outside the edges or if x (uncalibrated) -// is exactly on top of a keypoint, in which case the function returns 1 point. -// It uses a simple binary-search, so it is O(log(|kp_inputs|)). -template -InterpolationPoints FindInterpolationPoints( - const Dtype uncalibrated, - const typename TTypes::Vec& kp_inputs) { - if (uncalibrated <= kp_inputs(0)) { - return InterpolationPoints{1, 0}; - } - const int64_t kp_inputs_last_idx = static_cast(kp_inputs.size() - 1); - if (uncalibrated >= kp_inputs(kp_inputs_last_idx)) { - return InterpolationPoints{1, kp_inputs_last_idx}; - } - - // Binary search the keypoints inputs. - int64_t min_idx = 0, max_idx = kp_inputs.size(); - while (max_idx > min_idx + 1) { - const int64_t idx = (max_idx + min_idx) / 2; - const Dtype value = kp_inputs(idx); - if (uncalibrated == value) { - return InterpolationPoints{1, idx}; - } - if (uncalibrated > value) { - min_idx = idx; - } else { - max_idx = idx; - } - } - - // Two points, where lower_index is min_idx. - return InterpolationPoints{2, min_idx}; -} - -// Find interpolations points and associated weights for the given -// uncalibrated value and keypoints inputs (kp_inputs). -// Returns 1 interpolation point if uncalibrated is exactly on top of an -// input keypoint (or if beyond the edges), or 2 if in between two -// keypoints. -// See FindInterpolationPoints. -template -InterpolationPoints FindInterpolationPointsWithWeights( - const Dtype uncalibrated, - const typename TTypes::Vec& kp_inputs) { - // Get points an calculates weights. - InterpolationPoints interpolation_points = - FindInterpolationPoints(uncalibrated, kp_inputs); - if (interpolation_points.num_points == 1) { - // All weight goes to the exact one keypoint where the uncalibrated value - // lies. - interpolation_points.weights[0] = 1.0; - return interpolation_points; - } - - // assert(interpolation_points.num_points == 2) - // The piecewise linear interpolation weights (w) when x (uncalibrated) - // is in between two keypoints, is given by: - // - // w[lower_index] = 1.0 - theta(x, lower_index) - // w[lower_index + 1] = theta(x, lower_index) - // Where: - // theta(x, lower_index) = (x - keypoint_inputs[lower_index]) / - // delta(lower_index) - // delta(lower_index) = kp_inputs[lower_index+1] - kp_inputs[lower_index] - // - // Note: the calibration later will apply the weights to the keypoints - // outputs, in the following format: - // - // calibrated(x) = sum ( w(x) .* kp_outputs ) - // - // So in this particular case, down the pipe, we'll have: - // - // calibrated(x) = w[lower_index] * kp_outputs[lower_index] + - // w[lower_index + 1] * kp_outputs[lower_index + 1] - // - // And since w(x) is a linear in x, calibrated(x) will be linear in x as well. - const Dtype delta = kp_inputs(interpolation_points.lower_index + 1) - - kp_inputs(interpolation_points.lower_index); - interpolation_points.weights[1] = - (uncalibrated - kp_inputs(interpolation_points.lower_index)) / delta; - interpolation_points.weights[0] = 1.0 - interpolation_points.weights[1]; - return interpolation_points; -} - -template -void IndexingCalibratorWorker( - const typename TTypes::Vec& kp_inputs, - const typename TTypes::Vec& uncalibrated_flat, const int start, - const int limit, typename TTypes::Tensor interpolation) { - // Loop over input weights. - for (int i = start; i < limit; i++) { - // Find interpolation lower_index and weights (weights). - const InterpolationPoints interpolation_points = - FindInterpolationPointsWithWeights(uncalibrated_flat(i), - kp_inputs); - - // Copy interpolation weights. - for (int j = 0; j < interpolation_points.num_points; j++) { - interpolation(i, interpolation_points.lower_index + j) = - interpolation_points.weights[j]; - } - } -} - -} // namespace - -template -class PwlIndexingCalibratorOpKernel : public OpKernel { - public: - explicit PwlIndexingCalibratorOpKernel(OpKernelConstruction* context) - : OpKernel(context) {} - - void Compute(OpKernelContext* context) override { - // Grab keypoints inputs: it provides the num_keypoints. - const Tensor& kp_inputs_tensor = context->input(1); - OP_REQUIRES(context, kp_inputs_tensor.dims() == 1, - errors::InvalidArgument( - "keypoints must have dims=1, got kp_inputs.dims=", - kp_inputs_tensor.dims())); - auto kp_inputs = kp_inputs_tensor.vec(); - const int num_keypoints = kp_inputs.size(); - - // Uncalibrated value(s): it provides the batch_size. - const Tensor& uncalibrated_tensor = context->input(0); - OP_REQUIRES( - context, uncalibrated_tensor.dims() == 1, - errors::InvalidArgument("input must have dims=1, got input.dims=", - uncalibrated_tensor.dims())); - const auto& uncalibrated_flat = uncalibrated_tensor.vec(); - const int64 batch_size = uncalibrated_flat.size(); - - // Output tensor. - Tensor* interpolation_tensor = nullptr; - OP_REQUIRES_OK( - context, - context->allocate_output( - 0, TensorShape({uncalibrated_flat.size(), num_keypoints}), - &interpolation_tensor)); - auto interpolation_tensor_flat = - interpolation_tensor->flat_inner_dims(); - interpolation_tensor_flat.setZero(); - - if (test_force_split) { - // Debug mode: do one example at a time. - for (int ii = 0; ii < batch_size; ii++) { - IndexingCalibratorWorker(kp_inputs, uncalibrated_flat, ii, - ii + 1, interpolation_tensor_flat); - } - } else { - // Sharded (multi-threaded) calculation: - auto worker_threads = - *(context->device()->tensorflow_cpu_worker_threads()); - // Cost is O(N) because of having to zero out the weights. - - constexpr int64 kBaseCost = 20; - constexpr int64 kCostPerKeypoint = 20; - const int64 cost_per_unit = kBaseCost + num_keypoints * kCostPerKeypoint; - Shard(worker_threads.num_threads, worker_threads.workers, batch_size, - cost_per_unit, [&kp_inputs, &uncalibrated_flat, - &interpolation_tensor_flat](int start, int limit) { - IndexingCalibratorWorker(kp_inputs, uncalibrated_flat, - start, limit, - interpolation_tensor_flat); - }); - } - } - - TF_DISALLOW_COPY_AND_ASSIGN(PwlIndexingCalibratorOpKernel); -}; - -namespace { - -// This worker computes the partial derivative w.r.t. input (uncalibrated). -// -// To simplify, let's call x the input uncalibrated value of one example -// (as opposed to the full batch). Let's call w(x) the vector of interpolation -// weights, returned by PwlIndexingCalibratorOp. -// -// The optimizer needs to find the gradient of loss(x), which can be written -// as loss(w(x)). -// -// d(loss)/dx = d(loss)/d(w) * d(w)/dx -// -// grad_wrt_weights is the d(loss)/d(w) and is given. So this function needs to -// calculate d(w)/dx in order to return d(loss)/dx. -// -// For the common case, x is in between two keypoints (kp_inputs). Recall from -// comment in FindInterpolation above that: -// -// w[lower_index] = 1.0 - theta -// w[lower_index + 1] = theta -// Where: -// theta = (x - keypoint_inputs[lower_index]) / delta[lower_index] -// delta[lower_index] = kp_inputs[lower_index + 1] - kp_inputs[lower_index] -// -// For d(w)/dx we have: -// d(w[i])/dx = 0, for i != lower_index and i != lower_index +1 -// -// And for i = lower_index and lower_index+1 (notice that kp_inputs are -// constants): -// -// d(w[lower_index])/dx = - 1/delta[lower_index] -// d(w[lower_index+1)])/dx = 1/delta[lower_index] -// -// Since d(loss)/dx = d(loss)/d(w) * d(w)/dx, d(loss)/d(w) = grad_wrt_weights, -// we have: -// -// d(loss)/dx = (grad_wrt_weights[lower_index+1] - -// grad_wrt_weights[lower_index]) / delta[lower_index] - -template -void IndexingCalibratorInputGradientWorker( - const typename TTypes::Vec& kp_inputs, - const typename TTypes::Vec& uncalibrated_vec, - const typename TTypes::Matrix& grad_wrt_weights_mat, - const int start, const int limit, - typename TTypes::Vec* grad_wrt_input_vec) { - const int num_keypoints = kp_inputs.size(); - - // Loop over examples (batch_index) of the batch. - for (int batch_index = start; batch_index < limit; batch_index++) { - // Simpler non-batch (single value) version: - const Dtype uncalibrated = uncalibrated_vec(batch_index); - - // Find interpolation lower_index and weights (weights). - const InterpolationPoints interpolation_points = - FindInterpolationPoints(uncalibrated, kp_inputs); - - // Input grad has to be multiplied by the output grad. - if (interpolation_points.num_points == 2) { - // Input is in between 2 keypoints. - const Dtype delta = kp_inputs(interpolation_points.lower_index + 1) - - kp_inputs(interpolation_points.lower_index); - (*grad_wrt_input_vec)(batch_index) = - (grad_wrt_weights_mat(batch_index, - interpolation_points.lower_index + 1) - - grad_wrt_weights_mat(batch_index, - interpolation_points.lower_index)) / - delta; - - } else { // assert(interpolation_points.num_points == 1) - // Input is exactly on top of a keypoint. d(w)/dx is not defined in this - // case, and what we do is to average the d(w)/dx that comes to the right - // of it and the d(w)/dx to the left of it. - // - // To the right of lower_index we have, from above: - // - // d(loss)/dx = (grad_wrt_weights[lower_index+1] - - // grad_wrt_weights[lower_index]) / delta[lower_index] - // - // And from the left: - // - // d(loss)/dx = (grad_wrt_weights[lower_index] - - // grad_wrt_weights[lower_index-1]) / delta[lower_index - 1] - // - // And we take a sub-gradient (or super-gradient), by averaging of those - // two gradients, except if the keypoint is in one of the edges (start - // or end of the kp_inputs), in which case we just get the d(w)/dx from - // the side we have. - Dtype grad = 0.0; // == d(loss)/dx - int count = 0; - if (interpolation_points.lower_index > 0) { - const Dtype delta = kp_inputs(interpolation_points.lower_index) - - kp_inputs(interpolation_points.lower_index - 1); - grad = (grad_wrt_weights_mat(batch_index, - interpolation_points.lower_index) - - grad_wrt_weights_mat(batch_index, - interpolation_points.lower_index - 1)) / - delta; - ++count; - } - if (interpolation_points.lower_index < num_keypoints - 1) { - const Dtype delta = kp_inputs(interpolation_points.lower_index + 1) - - kp_inputs(interpolation_points.lower_index); - grad += (grad_wrt_weights_mat(batch_index, - interpolation_points.lower_index + 1) - - grad_wrt_weights_mat(batch_index, - interpolation_points.lower_index)) / - delta; - ++count; - } - if (count > 0) grad /= count; // Take mean. - (*grad_wrt_input_vec)(batch_index) = grad; - } - } -} -} // namespace - -template -class PwlIndexingCalibratorGradientOpKernel : public OpKernel { - public: - explicit PwlIndexingCalibratorGradientOpKernel(OpKernelConstruction* context) - : OpKernel(context) {} - - void Compute(OpKernelContext* context) override { - // Grab keypoints inputs: it provides the num_keypoints. - const Tensor& kp_inputs_tensor = context->input(1); - OP_REQUIRES(context, kp_inputs_tensor.dims() == 1, - errors::InvalidArgument( - "keypoints must have dims=1, got kp_inputs.dims=", - kp_inputs_tensor.dims())); - auto kp_inputs = kp_inputs_tensor.vec(); - const int num_keypoints = kp_inputs.size(); - - // Uncalibrated value(s): it provides the batch_size. - const Tensor& uncalibrated_tensor = context->input(0); - OP_REQUIRES( - context, uncalibrated_tensor.dims() == 1, - errors::InvalidArgument("input must have dims=1, got input.dims=", - uncalibrated_tensor.dims())); - const auto& uncalibrated_vec = uncalibrated_tensor.vec(); - const int64 batch_size = uncalibrated_vec.size(); - - // Gradient with respect to outputs, needed for chain rule. - const Tensor& grad_wrt_weights_tensor = context->input(2); - OP_REQUIRES( - context, grad_wrt_weights_tensor.dims() == 2, - errors::InvalidArgument("grad_wrt_weights_tensor must have dims=2, " - "got grad_wrt_weights_tensor.dims=", - grad_wrt_weights_tensor.dims())); - OP_REQUIRES( - context, grad_wrt_weights_tensor.dim_size(0) == batch_size, - errors::InvalidArgument( - "grad_wrt_weights_tensor (output gradient) has shape [batch_size=", - grad_wrt_weights_tensor.dim_size(0), - ", num_keypoints], expected batch_size=", batch_size, " instead")); - OP_REQUIRES( - context, grad_wrt_weights_tensor.dim_size(1) == num_keypoints, - errors::InvalidArgument( - "grad_wrt_weights_tensor (output gradient) has shape [batch_size, " - "num_keypoints=", - grad_wrt_weights_tensor.dim_size(1), "], expected num_keypoints=", - num_keypoints, " instead")); - const auto grad_wrt_weights_mat = grad_wrt_weights_tensor.matrix(); - - // Keypoints' inputs are fixed, so their gradient are always zero. - // The kp_inputs is of fixed size ([num_keypoints]) independent of the - // batch size. So the gradient wrt kp_inputs is summed over all batch, - // as opposed to the gradient wrt to the input. - Tensor* grad_wrt_kp_inputs = nullptr; - OP_REQUIRES_OK(context, - context->allocate_output(1, TensorShape({num_keypoints}), - &grad_wrt_kp_inputs)); - grad_wrt_kp_inputs->vec().setZero(); - - // Gradient with respect to input: - Tensor* grad_wrt_input_tensor = nullptr; - OP_REQUIRES_OK(context, - context->allocate_output(0, TensorShape({batch_size}), - &grad_wrt_input_tensor)); - auto grad_wrt_input_vec = grad_wrt_input_tensor->vec(); - - // Sharded (multi-threaded) calculation: - auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); - // Cost is O(N) because of having to zero out the weights. - - constexpr int64 kBaseCost = 20; - constexpr int64 kCostPerKeypoint = 20; - const int64 cost_per_unit = kBaseCost + num_keypoints * kCostPerKeypoint; - Shard(worker_threads.num_threads, worker_threads.workers, batch_size, - cost_per_unit, [&kp_inputs, &uncalibrated_vec, &grad_wrt_weights_mat, - &grad_wrt_input_vec](int start, int limit) { - IndexingCalibratorInputGradientWorker( - kp_inputs, uncalibrated_vec, grad_wrt_weights_mat, start, limit, - &grad_wrt_input_vec); - }); - } - - TF_DISALLOW_COPY_AND_ASSIGN(PwlIndexingCalibratorGradientOpKernel); -}; - -////////////////////////////////////////////////////////////////////////////// -// Sparse implementation -////////////////////////////////////////////////////////////////////////////// - -namespace { - -// Calculates expanded interpolation points: Similar to -// FindInterpolationPointsWithWeights above, but expand interpolation -// on exact keypoints to the ones around it. So it returns either 2 or -// 3 keypoints. -// The expansion is helpful in the sparse implementation because it makes the -// optimizing feature provide the d(loss)/d(w) for those points around, even -// when their weights are zero: see description of -// IndexingCalibratorInputGradientWorker above. -// Returns a InterpolationPoints struct with 2 or 3 points with the -// weights properly set. -template -InterpolationPoints FindExpandedInterpolationPointsWithWeights( - const Dtype uncalibrated, - const typename TTypes::Vec& kp_inputs) { - // Find interpolation points without expansion. - InterpolationPoints interpolation_points = - FindInterpolationPointsWithWeights(uncalibrated, kp_inputs); - - // Nothing changes for interpolation between keypoints. - if (interpolation_points.num_points == 2 || kp_inputs.size() == 1) { - return interpolation_points; - } - - // assert(interpolation_points.num_points == 1) - // Add second keypoint if on the first keypoint. - if (interpolation_points.lower_index == 0) { - interpolation_points.num_points = 2; - interpolation_points.weights[0] = 1; - interpolation_points.weights[1] = 0; - return interpolation_points; - } - - // Add second keypoint if on the last keypoint. - const auto kp_last = kp_inputs.size() - 1; - if (interpolation_points.lower_index == kp_last) { - interpolation_points.num_points = 2; - interpolation_points.lower_index--; - interpolation_points.weights[0] = 0; - interpolation_points.weights[1] = 1; - return interpolation_points; - } - - // Add keypoints on the sides when exactly on a middle keypoint. - interpolation_points.num_points = 3; - interpolation_points.lower_index--; - interpolation_points.weights[0] = 0; - interpolation_points.weights[1] = 1; - interpolation_points.weights[2] = 0; - return interpolation_points; -} - -// Calculates the gradient w.r.t the input, for the given interpolation points. -// This is a simple adaptation of IndexingCalibratorInputGradientWorker for -// sparse tensors. Please see the documentation in that function for the math -// details. -template -Dtype GradWRTInputSparse( - const int num_interpolation_points, - const typename TTypes::Vec& kp_inputs, const int64 lower_index, - const typename TTypes::Vec& grad_wrt_weights, - const int64 weights_base_idx) { - Dtype grad; - if (num_interpolation_points == 2) { - // Input is in between 2 keypoints. - const Dtype delta = kp_inputs(lower_index + 1) - kp_inputs(lower_index); - grad = (grad_wrt_weights(weights_base_idx + 1) - - grad_wrt_weights(weights_base_idx + 0)) / - delta; - - } else { - // assert(num_interpolation_points == 3) - // Input is exactly on top of a keypoint: average the slope of the - // previous and next keypoints: it's not correct since it is a point - // of discontinuity, but allows the weights to move. - const Dtype delta1 = kp_inputs(lower_index + 1) - kp_inputs(lower_index); - grad = (grad_wrt_weights(weights_base_idx + 1) - - grad_wrt_weights(weights_base_idx)) / - delta1; - - const Dtype delta2 = - kp_inputs(lower_index + 2) - kp_inputs(lower_index + 1); - grad += (grad_wrt_weights(weights_base_idx + 2) - - grad_wrt_weights(weights_base_idx + 1)) / - delta2; - - // Divided by 2 to get the mean of the gradients. - grad /= 2.0; - } - return grad; -} - -} // namespace - -template -class PwlIndexingCalibratorSparseOpKernel : public OpKernel { - public: - explicit PwlIndexingCalibratorSparseOpKernel(OpKernelConstruction* context) - : OpKernel(context) {} - - void Compute(OpKernelContext* context) override { - // Grab keypoints inputs. - const Tensor& kp_inputs_tensor = context->input(1); - OP_REQUIRES(context, kp_inputs_tensor.dims() == 1, - errors::InvalidArgument( - "keypoints must have dims=1, got kp_inputs.dims=", - kp_inputs_tensor.dims())); - auto kp_inputs = kp_inputs_tensor.vec(); - - // Uncalibrated value(s): it provides the batch_size. - const Tensor& uncalibrated_tensor = context->input(0); - OP_REQUIRES( - context, uncalibrated_tensor.dims() == 1, - errors::InvalidArgument("input must have dims=1, got input.dims=", - uncalibrated_tensor.dims())); - const auto& uncalibrated_vec = uncalibrated_tensor.vec(); - const int64 batch_size = uncalibrated_vec.size(); - - // Find interpolation points and weights for each uncalibrated - // value. - std::vector batch_lower_weight_indices(batch_size); - std::vector batch_weights(batch_size * kMaxNumInterpolationPoints); - std::vector batch_num_interpolation_points(batch_size); - int64 total_interpolation_points = 0; - for (int i = 0; i < batch_size; i++) { - const InterpolationPoints interpolation_points = - FindExpandedInterpolationPointsWithWeights(uncalibrated_vec(i), - kp_inputs); - for (int j = 0; j < interpolation_points.num_points; j++) { - batch_weights[total_interpolation_points + j] = - interpolation_points.weights[j]; - } - batch_num_interpolation_points[i] = interpolation_points.num_points; - batch_lower_weight_indices[i] = interpolation_points.lower_index; - total_interpolation_points += interpolation_points.num_points; - } - - // Copy interpolation weights into sparse tensor components: indices, - // weights. - // Build indices tensor: each index is a vector of 2 numbers: batch_index - // and the weight index. - Tensor* tensor_indices = nullptr; - OP_REQUIRES_OK( - context, - context->allocate_output( - 0, TensorShape({total_interpolation_points, 2}), &tensor_indices)); - auto tensor_indices_mat = tensor_indices->matrix(); - int64 sparse_index = 0; - for (int batch_index = 0; batch_index < batch_size; batch_index++) { - for (int col = 0; col < batch_num_interpolation_points[batch_index]; - col++) { - tensor_indices_mat(sparse_index, 0) = batch_index; - tensor_indices_mat(sparse_index, 1) = - batch_lower_weight_indices[batch_index] + col; - sparse_index++; - } - } - - // The weights in order of the sparse index is already calculated in - // batch_weights so we just need to copy them. - Tensor* tensor_weights = nullptr; - OP_REQUIRES_OK( - context, - context->allocate_output(1, TensorShape({total_interpolation_points}), - &tensor_weights)); - // Notice batch_weights has some overhead space, we can only copy - // total_interpolation_points weights. - std::copy(batch_weights.begin(), - batch_weights.begin() + total_interpolation_points, - tensor_weights->flat().data()); - } - - TF_DISALLOW_COPY_AND_ASSIGN(PwlIndexingCalibratorSparseOpKernel); -}; - -template -class PwlIndexingCalibratorSparseGradientOpKernel : public OpKernel { - public: - explicit PwlIndexingCalibratorSparseGradientOpKernel( - OpKernelConstruction* context) - : OpKernel(context) {} - - void Compute(OpKernelContext* context) override { - // Grab keypoints inputs: it provides the num_keypoints. - const Tensor& kp_inputs_tensor = context->input(1); - OP_REQUIRES(context, kp_inputs_tensor.dims() == 1, - errors::InvalidArgument( - "keypoints must have dims=1, got kp_inputs.dims=", - kp_inputs_tensor.dims())); - auto kp_inputs = kp_inputs_tensor.vec(); - const int num_keypoints = kp_inputs.size(); - - // Uncalibrated value(s): it provides the batch_size. - const Tensor& uncalibrated_tensor = context->input(0); - OP_REQUIRES( - context, uncalibrated_tensor.dims() == 1, - errors::InvalidArgument("input must have dims=1, got input.dims=", - uncalibrated_tensor.dims())); - const auto& uncalibrated_vec = uncalibrated_tensor.vec(); - const int64 batch_size = uncalibrated_vec.size(); - - // Interpolation indices returned by PwlIndexingCalibratorSparse op. - // It will be a matrix where each row represent an interpolation point - // given by (batch_index, weight_index), 0 <= batch_index < batch_size, - // 0 <= weight_index < kp_inputs.size(). - const Tensor& interpolation_indices_tensor = context->input(2); - OP_REQUIRES(context, interpolation_indices_tensor.dims() == 2, - errors::InvalidArgument( - "interpolation_indicesmust have dims=2, got input.dims=", - uncalibrated_tensor.dims())); - const auto interpolation_indices = - interpolation_indices_tensor.matrix(); - const int64 total_interpolation_points = - interpolation_indices_tensor.dim_size(0); - - // Gradient with respect to outputs, needed for chain rule. One value - // per sparse index in interpolation_indices. - const Tensor& grad_wrt_weights_tensor = context->input(3); - const auto grad_wrt_weights = grad_wrt_weights_tensor.vec(); - OP_REQUIRES( - context, grad_wrt_weights.size() == total_interpolation_points, - errors::InvalidArgument("grad_wrt_weights (", grad_wrt_weights.size(), - " elements) must have as many elements as the " - "total number of interpolation indices (", - total_interpolation_points, " elements)")); - - // Keypoints' inputs are fixed, so their gradient are always zero. Fixed - // size, invariant to the size of the batch. - Tensor* grad_wrt_kp_inputs = nullptr; - OP_REQUIRES_OK(context, - context->allocate_output(1, TensorShape({num_keypoints}), - &grad_wrt_kp_inputs)); - grad_wrt_kp_inputs->vec().setZero(); - - // Gradient with respect to inputs is dense and of the same dimension as - // the input, that is batch_size. - Tensor* grad_wrt_input_tensor = nullptr; - OP_REQUIRES_OK(context, - context->allocate_output(0, TensorShape({batch_size}), - &grad_wrt_input_tensor)); - auto grad_wrt_input = grad_wrt_input_tensor->vec(); - - // Each interpolation point is associated to one weigh in grad_wrt_weights - // and a pair of indices (batch index, lower_index) in - // interpolation_indices. - int64 sparse_index = 0; // Loops over all interpolation points. - while (sparse_index < total_interpolation_points) { - const int batch_index = interpolation_indices(sparse_index, 0); - OP_REQUIRES(context, batch_index >= 0 && batch_index < batch_size, - errors::InvalidArgument( - "invalid batch_index index for sparse " - "interpolation, expected 0 <= batch_index <= ", - batch_size, " got ", batch_index)); - const int64 weights_base_idx = sparse_index; - int64 lower_index = interpolation_indices(sparse_index, 1); - int num_interpolation_points = 0; - do { - num_interpolation_points++; - sparse_index++; - } while (sparse_index < total_interpolation_points && - interpolation_indices(sparse_index, 0) == batch_index && - num_interpolation_points < kMaxNumInterpolationPoints + 1); - OP_REQUIRES( - context, - num_interpolation_points == 2 || num_interpolation_points == 3, - errors::InvalidArgument( - "only interpolations with 2 or 3 points are supported, got ", - num_interpolation_points)); - grad_wrt_input(batch_index) = GradWRTInputSparse( - num_interpolation_points, kp_inputs, lower_index, grad_wrt_weights, - weights_base_idx); - } - } - - TF_DISALLOW_COPY_AND_ASSIGN(PwlIndexingCalibratorSparseGradientOpKernel); -}; - -////////////////////////////////////////////////////////////////////////////// -// Kernels registration for all operation defined here. -////////////////////////////////////////////////////////////////////////////// -REGISTER_KERNEL_BUILDER(Name("PwlIndexingCalibrator") - .Device(DEVICE_CPU) - .TypeConstraint("Dtype"), - PwlIndexingCalibratorOpKernel); -REGISTER_KERNEL_BUILDER(Name("PwlIndexingCalibrator") - .Device(DEVICE_CPU) - .TypeConstraint("Dtype"), - PwlIndexingCalibratorOpKernel); - -REGISTER_KERNEL_BUILDER(Name("PwlIndexingCalibratorGradient") - .Device(DEVICE_CPU) - .TypeConstraint("Dtype"), - PwlIndexingCalibratorGradientOpKernel); -REGISTER_KERNEL_BUILDER(Name("PwlIndexingCalibratorGradient") - .Device(DEVICE_CPU) - .TypeConstraint("Dtype"), - PwlIndexingCalibratorGradientOpKernel); - -REGISTER_KERNEL_BUILDER(Name("PwlIndexingCalibratorSparse") - .Device(DEVICE_CPU) - .TypeConstraint("Dtype"), - PwlIndexingCalibratorSparseOpKernel); -REGISTER_KERNEL_BUILDER(Name("PwlIndexingCalibratorSparse") - .Device(DEVICE_CPU) - .TypeConstraint("Dtype"), - PwlIndexingCalibratorSparseOpKernel); - -REGISTER_KERNEL_BUILDER(Name("PwlIndexingCalibratorSparseGradient") - .Device(DEVICE_CPU) - .TypeConstraint("Dtype"), - PwlIndexingCalibratorSparseGradientOpKernel); -REGISTER_KERNEL_BUILDER(Name("PwlIndexingCalibratorSparseGradient") - .Device(DEVICE_CPU) - .TypeConstraint("Dtype"), - PwlIndexingCalibratorSparseGradientOpKernel); - -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/kernels/simplex_interpolation_kernels.cc b/tensorflow_lattice/cc/kernels/simplex_interpolation_kernels.cc deleted file mode 100644 index 8539a8c..0000000 --- a/tensorflow_lattice/cc/kernels/simplex_interpolation_kernels.cc +++ /dev/null @@ -1,285 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include -#include -#include -#include -#include - -#include "tensorflow_lattice/cc/kernels/lattice_interpolation_base.h" -#include "tensorflow_lattice/cc/lib/lattice_structure.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/tensor.h" - -namespace tensorflow { -namespace lattice { - -namespace { - -// Returns the permutation such that -// values[permutation[0]] >= ... >= values[permutation[d - 1]] where -// d == values.size(). -template -std::vector DescendingPermutation(const std::vector& values) { - std::vector permutation(values.size()); - std::iota(permutation.begin(), permutation.end(), 0); - - auto cmp = [&values](const int64 left, const int64 right) -> bool { - return values[left] > values[right]; - }; - - std::sort(permutation.begin(), permutation.end(), cmp); - - return permutation; -} - -} // namespace - -// SimplexInterpolationOpKernel returns interpolation weights. -template -class SimplexInterpolationOpKernel : public LatticeInterpolationOpBase { - public: - explicit SimplexInterpolationOpKernel(OpKernelConstruction* context) - : LatticeInterpolationOpBase(context) { - - constexpr int64 kBaseCost = 20; - constexpr int64 kCostPerCellVertex = 20; - constexpr int64 kWeightInitializationCost = 1; - const int64 input_dim = this->GetLatticeStructure().Dimension(); - this->SetCostPerExample(kCostPerCellVertex * input_dim * log(input_dim) + - kWeightInitializationCost * - this->GetLatticeStructure().NumVertices() + - kBaseCost); - } - - private: - InterpolationWeights ComputeInterpolationWeights( - const LatticeStructure& lattice_structure, - typename TTypes::UnalignedConstFlat input_vector) const final; - - TF_DISALLOW_COPY_AND_ASSIGN(SimplexInterpolationOpKernel); -}; - -// SimplexGradientOpKernel returns gradient with respect to the -// input. See details in CalculateGradientWrtInput above. -template -class SimplexGradientOpKernel : public LatticeGradientOpBase { - public: - explicit SimplexGradientOpKernel(OpKernelConstruction* context) - : LatticeGradientOpBase(context) { - - constexpr int64 kBaseCost = 20; - constexpr int64 kCostPerCellVertex = 20; - const int64 input_dim = this->GetLatticeStructure().Dimension(); - this->SetCostPerExample(kCostPerCellVertex * input_dim * log(input_dim) + - kBaseCost); - } - - private: - std::vector ComputeGradWrtInput( - const LatticeStructure& lattice_structure, - typename TTypes::UnalignedConstFlat input_vector, - typename TTypes::UnalignedConstFlat weight_vector, - typename TTypes::UnalignedConstFlat grad_wrt_weight_vector) - const final; - - TF_DISALLOW_COPY_AND_ASSIGN(SimplexGradientOpKernel); -}; - -// Produces simplex interpolation weights for an input that is in the unit -// hypercube (the residual), as well as the corresponding indices in the lattice -// (based on the bottom_corner). See http://jmlr.org/papers/v17/15-243.html for -// more details. -// -// Calculating the linear interpolation weights -// -------------------------------------------- -// We compute the linear interpolation weights using Lovasz's extension. -// The formula for Lovasz's extension: -// 1. Find the permuation such that -// -// input[permutation[0]] >= ... >= input[permutation[d-1]] -// -// 2. Assign the weight such that -// -// weight on e0 = 1 - input[permutation[0]] -// weight on e0 + e[permutation[0]] = input[permutation[0]] - -// input[permutation[1]] -// ... -// weight on e0 + \sum_{i=0}^k e[permutation[i]] = input[permutation[k]] - -// input[permutation[k + 1]] -// .... -// weight on e0 + \sum_{i=0}^{d - 1} e[permutation[i]] = -// input[permutation[d - 1]] -// -// where e0 = [0,...0], e[i] = [0,...,1,...,0] whose ith component is 1. -// (Note that the weight is in the 2 ** D dimensional probability simplex, hence -// the valid interpolation weight.) -// -// This is equivalent to partition the hypercube into d! simplices, where each -// simplex has d+1 vertices, and each simplex's vertices includes the all-zeros -// vertex, one vertex with one ones, one vertex with two ones, ... and the -// all-ones vertex. -// -// For example, for a three-dimensional unit hypercube the 3! = 6 simplices -// are: -// 1: [0,0,0], [0,0,1], [0,1,1], [1,1,1] -// 2: [0,0,0], [0,0,1], [1,0,1], [1,1,1] -// 3: [0,0,0], [0,1,0], [0,1,1], [1,1,1] -// 4: [0,0,0], [0,1,0], [1,1,0], [1,1,1] -// 5: [0,0,0], [1,0,0], [1,1,0], [1,1,1] -// 6: [0,0,0], [1,0,0], [1,0,1], [1,1,1] -// -// Thus we can specify one of the d! simplices by a d-dim vector stating the -// order in which the vertices add 1. In the example above, the first simplex -// can be specified as [2,1,0], and the second simplex as [2,0,1]. -// -// For the first simplex, the weights are given by -// -// weight on [0,0,0] = 1 - input[2] -// weight on [0,0,1] = input[2] - input[1] -// weight on [0,1,1] = input[1] - input[0] -// weight on [1,1,1] = input[0] -// weight on others = 0. -// -// For the second simplex, the weights are given by -// weight on [0,0,0] = 1 - input[2] -// weight on [0,0,1] = input[2] - input[0] -// weight on [1,0,1] = input[0] - input[1] -// weight on [1,1,1] = input[0] -// weight on others = 0. -// -// An extension to the multi-cell case is done by -// 1. Finding the bottom corner index and the residual vector. -// 2. Compute the interpolation weight using the residual vector. -// 3. Modify e[i] = strides[i] + bottom_corner_index. -// - -template -InterpolationWeights -SimplexInterpolationOpKernel::ComputeInterpolationWeights( - const LatticeStructure& lattice_structure, - typename TTypes::UnalignedConstFlat input) const { - const BottomCornerIndexAndResidual bottom_corner_index_and_residual = - lattice_structure.GetBottomCornerIndexAndResidual(input); - const std::vector& residual = - bottom_corner_index_and_residual.residual; - const std::vector descending_permutation = - DescendingPermutation(residual); - - const int64 input_dim = lattice_structure.Dimension(); - // interpolation weight contains upto d + 1 non-zero elements. - // Number of non-zero weights. - const int64 nnz_weight = input_dim + 1; - InterpolationWeights interpolation_weights; - std::vector& index = interpolation_weights.indices; - std::vector& weight = interpolation_weights.weights; - index.resize(nnz_weight); - weight.resize(nnz_weight); - - Dtype current_residual = 1.0; - int64 current_index = bottom_corner_index_and_residual.bottom_corner_index; - const std::vector& strides = lattice_structure.Strides(); - for (int ii = 0; ii < input_dim; ++ii) { - const int64 current_dim = descending_permutation[ii]; - const Dtype next_residual = residual[current_dim]; - // Assigning index and weight. - index[ii] = current_index; - weight[ii] = current_residual - next_residual; - // Proceed to the next item. - current_index += strides[current_dim]; - current_residual = next_residual; - } - // The boundary case. - index[input_dim] = current_index; - weight[input_dim] = current_residual; - - return interpolation_weights; -} - -// The goal of the gradient op is, given grad_wrt_weight: -// (dy / dweight[0], dy / dweight[1], dy / dweight[2], dy / dweight[3]), -// to compute the grad_wrt_input: -// (dy / dx[0], ..., dy / dx[D-1]). -// -// We know that: -// dy/dx[jj] = sum_{ii \in weights} dy/dweight[ii] * dweight[ii]/dx[jj] -// -// So we need to calculate dweight[ii]/dx[jj]. Let us consider 2 x 2 lattice -// case first. Recall that -// -// weight = \sum_k input[k] * (e[permutation[k + 1]] - e[permutation[k]]) -// -// which is a linear function in input[k]. Therefore the gradient can be -// computed easily once we have the permutation. -// -// The boudnary case (out_of_bound): -// When input[k] > 1 or input[k] < 0, we assign the zero gradient. -// - -template -std::vector SimplexGradientOpKernel::ComputeGradWrtInput( - const LatticeStructure& lattice_structure, - typename TTypes::UnalignedConstFlat input, - typename TTypes::UnalignedConstFlat unused_weight, - typename TTypes::UnalignedConstFlat grad_wrt_weight) const { - const BottomCornerIndexAndResidual bottom_corner_index_and_residual = - lattice_structure.GetBottomCornerIndexAndResidual(input); - const std::vector& residual = - bottom_corner_index_and_residual.residual; - const std::vector descending_permutation = - DescendingPermutation(residual); - - const int64 input_dim = lattice_structure.Dimension(); - int64 current_index = bottom_corner_index_and_residual.bottom_corner_index; - int64 current_coefficient = grad_wrt_weight(current_index); - const std::vector& strides = lattice_structure.Strides(); - const std::vector& out_of_bound = - bottom_corner_index_and_residual.out_of_bound; - - // Initialization. - std::vector grad_wrt_input(input_dim, 0.0); - for (const int64 current_dim : descending_permutation) { - current_index += strides[current_dim]; - const Dtype next_coefficient = grad_wrt_weight(current_index); - // Only update the gradient if it is not out of bound. - if (!out_of_bound[current_dim]) { - grad_wrt_input[current_dim] = (next_coefficient - current_coefficient); - } - current_coefficient = next_coefficient; - } - return grad_wrt_input; -} - -// Register kernels for float and double. -REGISTER_KERNEL_BUILDER(Name("SimplexInterpolation") - .Device(DEVICE_CPU) - .TypeConstraint("Dtype"), - SimplexInterpolationOpKernel); - -REGISTER_KERNEL_BUILDER(Name("SimplexInterpolation") - .Device(DEVICE_CPU) - .TypeConstraint("Dtype"), - SimplexInterpolationOpKernel); - -REGISTER_KERNEL_BUILDER( - Name("SimplexGradient").Device(DEVICE_CPU).TypeConstraint("Dtype"), - SimplexGradientOpKernel); - -REGISTER_KERNEL_BUILDER( - Name("SimplexGradient").Device(DEVICE_CPU).TypeConstraint("Dtype"), - SimplexGradientOpKernel); - -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/lib/BUILD b/tensorflow_lattice/cc/lib/BUILD deleted file mode 100644 index 5580cdd..0000000 --- a/tensorflow_lattice/cc/lib/BUILD +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -licenses(["notice"]) # Apache 2.0 - -package( - default_visibility = [ - "//tensorflow_lattice:__subpackages__", - ], -) - -cc_library( - name = "lattice_structure", - srcs = ["lattice_structure.cc"], - hdrs = ["lattice_structure.h"], - deps = [ - "@org_tensorflow//tensorflow/core:framework_headers_lib", - "@org_tensorflow//tensorflow/core:framework_lite", - "@protobuf_archive//:protobuf", - ], -) diff --git a/tensorflow_lattice/cc/lib/lattice_structure.cc b/tensorflow_lattice/cc/lib/lattice_structure.cc deleted file mode 100644 index e9029c6..0000000 --- a/tensorflow_lattice/cc/lib/lattice_structure.cc +++ /dev/null @@ -1,51 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow_lattice/cc/lib/lattice_structure.h" - -#include -#include - -#include "tensorflow/core/framework/tensor.h" - -namespace tensorflow { -namespace lattice { - -LatticeStructure::LatticeStructure(const std::vector& lattice_sizes) - : lattice_sizes_(lattice_sizes) { - dimension_ = lattice_sizes_.size(); - strides_.resize(dimension_); - num_vertices_ = 1; - for (int ii = 0; ii < dimension_; ++ii) { - strides_[ii] = num_vertices_; - num_vertices_ *= lattice_sizes_[ii]; - } - num_vertices_per_cell_ = 1 << dimension_; -} - -bool LatticeStructure::IsValidLatticeSizes( - const std::vector& lattice_sizes) { - if (lattice_sizes.empty()) { - return false; - } - for (int size : lattice_sizes) { - if (size < 2) { - return false; - } - } - return true; -} - -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/lib/lattice_structure.h b/tensorflow_lattice/cc/lib/lattice_structure.h deleted file mode 100644 index 1ea8176..0000000 --- a/tensorflow_lattice/cc/lib/lattice_structure.h +++ /dev/null @@ -1,159 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -// Lattice structure class that represents a lattice with column-major indexing. -#ifndef TENSORFLOW_LATTICE_CC_LIB_LATTICE_STRUCTURE_H_ -#define TENSORFLOW_LATTICE_CC_LIB_LATTICE_STRUCTURE_H_ - -#include -#include - -#include "tensorflow/core/framework/tensor.h" - -namespace tensorflow { -namespace lattice { - -template -Dtype ClipToBounds(const Dtype value, const Dtype lower_bound, - const Dtype upper_bound) { - return value > upper_bound ? upper_bound - : (value < lower_bound ? lower_bound : value); -} - -// BottomCornerIndexAndResidual contains a bottom corner index in the multi-cell -// lattice and residual vector for a given input. If out_of_bound[k] is true, -// then kth input is outside of multi-cell lattice's boundary. -template -struct BottomCornerIndexAndResidual { - int64 bottom_corner_index; - std::vector residual; - std::vector out_of_bound; -}; - -// This class represents a structure of a multi-cell lattice including the -// dimension of a lattice, number of vertices, number of vertices in each cell, -// and strides for a global index. -// For example, in 2d case, a multi-cell lattice is a grid. The following -// example shows a 3 x 2 multi-cell lattice. Each cell has four vertices, and -// in total, this multi-cell lattice contains 12 vertices. -// -// -------------------------- -// | | | | -// | | | | -// -------------------------- -// | | | | -// | | | | -// -------------------------- -// -// With the column-major indexing, the lattice with lattice_sizes -// [m_0, m_1, ..., m_{n - 1}] will have: -// dimension: n -// number of vertices: m_0 * ... * m_{n-1} -// number of vertices in each cell: 2 ** (n-1) -// stride[0] = 1 -// stride[1] = 1 * m_{0} -// ... -// stride[n-1] = 1 * m_{n - 2} ... * m_0 -// -// Moreover, BottomCornerIndexAndResidual method returns the bottom corner index -// and residual vector of a input vector in a multi-cell lattice. -class LatticeStructure { - public: - // lattice_sizes[ii] is expected to contain a lattice size of the iith - // coordinate. - explicit LatticeStructure(const std::vector& lattice_sizes); - - // Returns true if all elements >= 2. - static bool IsValidLatticeSizes(const std::vector& lattice_sizes); - - const int64 Dimension() const { return dimension_; } - const int64 NumVertices() const { return num_vertices_; } - const int64 NumVerticesPerCell() const { return num_vertices_per_cell_; } - - int64 Stride(int64 dimension) const { return strides_[dimension]; } - const std::vector& Strides() const { return strides_; } - int LatticeSize(int64 dimension) const { return lattice_sizes_[dimension]; } - const std::vector& LatticeSizes() const { return lattice_sizes_; } - - // Returns the bottom corner index of a cell that the input_vec belongs to and - // the residual of vector, which is input_vec minus the vector corresponding - // to the bottom corner index. - // For example, consider the following 5 x 3 lattice in 2d plane. - // - // x2 - // | - // | - // 10 ---- 11 ---- 12 ---- 13 --- 14 - // | | | x | | - // | | | | | - // 5 ----- 6 ----- 7 ----- 8 ---- -9 - // | | | | | - // | | | | | - // 0 ----- 1 ----- 2 ----- 3 ----- 4----x1 - // - // where the number at each vertex is the global index of each vertex. Each - // cell is a square with the width 1. So the coordinate representation of - // 0-indexed vertex is (0, 0), 1-indexed vertex is (1, 0), and 14-indexed - // vertex is (4, 2). - // Let x be the input vector, located at (2.5, 1.8). In this case, the - // cell's bottom corner index is 7, and the residual is (0.5, 0.8). - template - BottomCornerIndexAndResidual GetBottomCornerIndexAndResidual( - typename TTypes::UnalignedConstFlat input_vec) const; - - private: - int64 dimension_; - int64 num_vertices_; - int64 num_vertices_per_cell_; - std::vector lattice_sizes_; - std::vector strides_; -}; - -template -BottomCornerIndexAndResidual -LatticeStructure::GetBottomCornerIndexAndResidual( - typename TTypes::UnalignedConstFlat vec) const { - BottomCornerIndexAndResidual bottom_corner_index_and_residual; - int64& bottom_corner_index = - bottom_corner_index_and_residual.bottom_corner_index; - std::vector& residual = bottom_corner_index_and_residual.residual; - std::vector& out_of_bound = - bottom_corner_index_and_residual.out_of_bound; - - residual.resize(dimension_); - out_of_bound.resize(dimension_); - - bottom_corner_index = 0; - for (int64 ii = 0; ii < dimension_; ++ii) { - const int64 max_vertex_in_ii = lattice_sizes_[ii] - 1; - const Dtype input_ii = vec(ii); - // Find the bottom corner lattice coordinates for the "ii"th feature of - // this point. We clip to the bounds of the lattice, [0, max_vertex_in_ii]. - - const int64 bottom_corner_ii = ClipToBounds( - static_cast(std::floor(input_ii)), 0, max_vertex_in_ii - 1); - const Dtype residual_ii = - ClipToBounds(input_ii - bottom_corner_ii, 0.0, 1.0); - - bottom_corner_index += strides_[ii] * bottom_corner_ii; - residual[ii] = residual_ii; - out_of_bound[ii] = (input_ii < 0.0 || input_ii > max_vertex_in_ii); - } - return bottom_corner_index_and_residual; -} - -} // namespace lattice -} // namespace tensorflow - -#endif // TENSORFLOW_LATTICE_CC_LIB_LATTICE_STRUCTURE_H_ diff --git a/tensorflow_lattice/cc/ops/hypercube_interpolation_ops_test.cc b/tensorflow_lattice/cc/ops/hypercube_interpolation_ops_test.cc deleted file mode 100644 index 49ad861..0000000 --- a/tensorflow_lattice/cc/ops/hypercube_interpolation_ops_test.cc +++ /dev/null @@ -1,87 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#define EIGEN_USE_THREADS - -#include "tensorflow_lattice/cc/ops/hypercube_interpolation_ops_test.h" -#include "tensorflow/core/framework/fake_input.h" -#include "tensorflow/core/framework/node_def_builder.h" -#include "tensorflow/core/framework/shape_inference_testutil.h" -#include "tensorflow/core/lib/core/status_test_util.h" - -namespace tensorflow { -namespace lattice { -namespace { - -INSTANTIATE_TEST_CASE_P(HypercubeInterpolationOpsTestCPU, - HypercubeInterpolationOpsTest, - ::testing::Values([](OpsTestBase*) {})); - -TEST(HypercubeInterpolationOpsShapeTest, HypercubeInterpolation_ShapeFn) { - ShapeInferenceTestOp op("HypercubeInterpolation"); - - // Total number of weights = 3 x 2 x 3 = 18. - // Output dimension is always 18. - std::vector lattice_sizes = {3, 2, 3}; - TF_ASSERT_OK(NodeDefBuilder("test", "HypercubeInterpolation") - .Input(FakeInput(DT_FLOAT)) - .Attr("lattice_sizes", lattice_sizes) - .Finalize(&op.node_def)); - - INFER_OK(op, "[10,3]", "[d0_0,18]"); - INFER_OK(op, "[?,3]", "[d0_0,18]"); - - INFER_ERROR("", op, "[?,?]"); - INFER_ERROR("Shape must be rank 2 but is rank 3", op, "[?,?,1]"); - INFER_ERROR("Shape must be rank 2 but is rank 1", op, "[10]"); - INFER_ERROR("Dimension must be 3 but is 2", op, "[?,2]"); - INFER_ERROR("Dimension must be 3 but is 2", op, "[5,2]"); -} - -TEST(HypercubeGradientOpsShapeTest, HypercubeGradient_ShapeFn) { - ShapeInferenceTestOp op("HypercubeGradient"); - - // Total number of weights = 3 x 2 x 3 = 18. - // Output dimension is always 18. - std::vector lattice_sizes = {3, 2, 3}; - TF_ASSERT_OK(NodeDefBuilder("test", "HypercubeGradient") - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Attr("lattice_sizes", lattice_sizes) - .Finalize(&op.node_def)); - - INFER_OK(op, "[10,3];[10,18];[10,18]", "[d0_0,d0_1]"); - INFER_OK(op, "[?,3];[?,18];[?,18]", "[d0_0,d0_1]"); - - INFER_ERROR("", op, "[?,?]"); - INFER_ERROR("Shape must be rank 2 but is rank 3", op, "[?,?,1];[?,1];[?,1]"); - INFER_ERROR("Shape must be rank 2 but is rank 1", op, "[10];[?,?,1];[?,?,1]"); - INFER_ERROR("Dimension must be 3 but is 2", op, "[?,2];[2,3];[2,3]"); - INFER_ERROR("Dimension must be 3 but is 2", op, "[5,2];[5,5];[5,5]"); - INFER_ERROR("Shape must be rank 2 but is rank 3", op, "[2,3];[?,1,3];[?,1]"); - INFER_ERROR("Shape must be rank 2 but is rank 1", op, "[2,3];[10];[10]"); - INFER_ERROR("Input batch size (2) != Weight batch size (5)", op, - "[2,3];[5,18];[5,18]"); - INFER_ERROR("Weight shape ([2,18]) != GradWrtWeight shape ([5,18])", op, - "[2,3];[2,18];[5,18]"); - INFER_ERROR("Weight shape ([2,18]) != GradWrtWeight shape ([2,15])", op, - "[2,3];[2,18];[2,15]"); - INFER_ERROR("Dimension must be 18 but is 17", op, "[?,3];[?,17];[?,17]"); - INFER_ERROR("Dimension must be 18 but is 5", op, "[5,3];[5,5];[5,5]"); -} - -} // namespace -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/ops/hypercube_interpolation_ops_test.h b/tensorflow_lattice/cc/ops/hypercube_interpolation_ops_test.h deleted file mode 100644 index fa76b67..0000000 --- a/tensorflow_lattice/cc/ops/hypercube_interpolation_ops_test.h +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LATTICE_CC_OPS_LATTICE_OPS_TEST_H_ -#define TENSORFLOW_LATTICE_CC_OPS_LATTICE_OPS_TEST_H_ - -#include - -#include "tensorflow/core/kernels/ops_testutil.h" - -namespace tensorflow { -namespace lattice { - -class HypercubeInterpolationOpsTest - : public OpsTestBase, - public ::testing::WithParamInterface> { - protected: - void SetUp() override { GetParam()(this); } -}; - -} // namespace lattice -} // namespace tensorflow - -#endif // TENSORFLOW_LATTICE_CC_OPS_LATTICE_OPS_TEST_H_ diff --git a/tensorflow_lattice/cc/ops/hypercube_interpolation_ops_test_p.cc b/tensorflow_lattice/cc/ops/hypercube_interpolation_ops_test_p.cc deleted file mode 100644 index 79319d9..0000000 --- a/tensorflow_lattice/cc/ops/hypercube_interpolation_ops_test_p.cc +++ /dev/null @@ -1,93 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#define EIGEN_USE_THREADS - -#include -#include - -#include "tensorflow_lattice/cc/ops/hypercube_interpolation_ops_test.h" -#include "tensorflow/core/framework/fake_input.h" -#include "tensorflow/core/framework/node_def_builder.h" -#include "tensorflow/core/framework/tensor.h" -#include "tensorflow/core/framework/tensor_shape.h" -#include "tensorflow/core/framework/tensor_testutil.h" -#include "tensorflow/core/framework/types.pb.h" -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow/core/lib/core/status_test_util.h" -#include "tensorflow/core/platform/logging.h" - -namespace tensorflow { -namespace lattice { -namespace { - -TEST_P(HypercubeInterpolationOpsTest, ThreeDoubleLattice) { - const std::vector lattice_sizes = {3}; - TF_ASSERT_OK( - NodeDefBuilder("hypercube_interpolation", "HypercubeInterpolation") - .Input(FakeInput(DT_DOUBLE)) - .Attr("lattice_sizes", lattice_sizes) - .Finalize(node_def())); - TF_ASSERT_OK(InitOp()); - // Input tensor = [[-1], [0], [0.2], [0.8], [1.0], [1.3], [2.0], [2.5]]. - AddInputFromArray(TensorShape({8, 1}), - {-1.0, 0.0, 0.2, 0.8, 1.0, 1.3, 2.0, 2.5}); - TF_ASSERT_OK(RunOpKernel()); - // expected weight = [[1, 0, 0], [1, 0, 0], [0.8, 0.2, 0], [0.2, 0.8, 0], - // [0, 1, 0], [0, 0.7, 0.3], [0, 0, 1.0], [0, 0, 1.0]]. - Tensor expected_weights(DT_DOUBLE, TensorShape({8, 3})); - test::FillValues( - &expected_weights, - {1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.8, 0.2, 0.0, 0.2, 0.8, 0.0, - 0.0, 1.0, 0.0, 0.0, 0.7, 0.3, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0}); - - LOG(INFO) << "Input: " << GetInput(0).SummarizeValue(8); - LOG(INFO) << "Expected weight: " << expected_weights.SummarizeValue(24); - LOG(INFO) << "Result: " << GetOutput(0)->SummarizeValue(24); - test::ExpectTensorEqual(expected_weights, *GetOutput(0)); -} - -TEST_P(HypercubeInterpolationOpsTest, TwoByTwoFloatLattice) { - const std::vector lattice_sizes = {2, 2}; - TF_ASSERT_OK( - NodeDefBuilder("hypercube_interpolation", "HypercubeInterpolation") - .Input(FakeInput(DT_FLOAT)) - .Attr("lattice_sizes", lattice_sizes) - .Finalize(node_def())); - TF_ASSERT_OK(InitOp()); - // Input tensor = [[0, 0], [0, 1], [1, 0], [1, 1], [0.5, 0.5], [0.2, 0.8], - // [0.2, 0.3]] - AddInputFromArray( - TensorShape({7, 2}), - {0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.5, 0.5, 0.2, 0.8, 0.2, 0.3}); - TF_ASSERT_OK(RunOpKernel()); - // expected weight = [[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1], - // [0.25, 0.25, 0.25, 0.25], [0.16, 0.04, 0.64, 0.16], [0.56, 0.14, 0.24, - // 0.06]] - Tensor expected_weights(DT_FLOAT, TensorShape({7, 4})); - test::FillValues( - &expected_weights, - {1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, - 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.25, 0.25, 0.25, 0.25, - 0.16, 0.04, 0.64, 0.16, 0.56, 0.14, 0.24, 0.06}); - - LOG(INFO) << "Input: " << GetInput(0).SummarizeValue(14); - LOG(INFO) << "Expected weight: " << expected_weights.SummarizeValue(28); - LOG(INFO) << "Result: " << GetOutput(0)->SummarizeValue(28); - test::ExpectTensorEqual(expected_weights, *GetOutput(0)); -} - -} // namespace -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/ops/lattice_interpolation_ops.cc b/tensorflow_lattice/cc/ops/lattice_interpolation_ops.cc deleted file mode 100644 index dacc620..0000000 --- a/tensorflow_lattice/cc/ops/lattice_interpolation_ops.cc +++ /dev/null @@ -1,184 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include - -#include "tensorflow_lattice/cc/lib/lattice_structure.h" -#include "tensorflow/core/framework/op.h" -#include "tensorflow/core/framework/shape_inference.h" -#include "tensorflow/core/lib/strings/str_util.h" -#include "tensorflow/core/lib/strings/strcat.h" - -namespace tensorflow { -namespace lattice { - -namespace { -Status InterpolationShapeFn(shape_inference::InferenceContext* c) { - std::vector lattice_sizes; - TF_RETURN_IF_ERROR(c->GetAttr("lattice_sizes", &lattice_sizes)); - if (!LatticeStructure::IsValidLatticeSizes(lattice_sizes)) { - return errors::InvalidArgument(str_util::Join(lattice_sizes, ","), - " is not a valid lattice sizes"); - } - - // input_shape = [?,lattice_sizes.size()]. - shape_inference::ShapeHandle input_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &input_shape)); - shape_inference::DimensionHandle unused_lattice_input_size; - TF_RETURN_IF_ERROR(c->WithValue(c->Dim(input_shape, 1), lattice_sizes.size(), - &unused_lattice_input_size)); - - shape_inference::DimensionHandle batch_size = c->Dim(input_shape, 0); - LatticeStructure lattice_structure(lattice_sizes); - c->set_output(0, c->Matrix(batch_size, lattice_structure.NumVertices())); - - return Status::OK(); -} - -Status GradWrtInputShapeFn(shape_inference::InferenceContext* c) { - std::vector lattice_sizes; - TF_RETURN_IF_ERROR(c->GetAttr("lattice_sizes", &lattice_sizes)); - if (!LatticeStructure::IsValidLatticeSizes(lattice_sizes)) { - return errors::InvalidArgument(str_util::Join(lattice_sizes, ","), - " is not a valid lattice sizes"); - } - LatticeStructure lattice_structure(lattice_sizes); - - // input_shape = [?,lattice_sizes.size()]. - shape_inference::ShapeHandle input_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &input_shape)); - shape_inference::DimensionHandle batch_size = c->Dim(input_shape, 0); - shape_inference::DimensionHandle input_size; - TF_RETURN_IF_ERROR( - c->WithValue(c->Dim(input_shape, 1), lattice_sizes.size(), &input_size)); - - // weight_shape = [?,LatticeStructure.NumVertcies()]. - shape_inference::ShapeHandle weight_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &weight_shape)); - if (c->Value(c->Dim(weight_shape, 0)) != c->Value(c->Dim(input_shape, 0))) { - return errors::InvalidArgument(strings::StrCat( - "Input batch size (", c->DebugString(c->Dim(input_shape, 0)), - ") != Weight batch size (", c->DebugString(c->Dim(weight_shape, 0)), - ")")); - } - shape_inference::DimensionHandle unused_weight_size; - TF_RETURN_IF_ERROR(c->WithValue(c->Dim(weight_shape, 1), - lattice_structure.NumVertices(), - &unused_weight_size)); - - // grad_wrt_weight_shape = [?,LatticeStructure.NumVertcies()]. - shape_inference::ShapeHandle grad_wrt_weight_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 2, &grad_wrt_weight_shape)); - if ((c->Value(c->Dim(weight_shape, 0)) != - c->Value(c->Dim(grad_wrt_weight_shape, 0))) || - (c->Value(c->Dim(weight_shape, 1)) != - c->Value(c->Dim(grad_wrt_weight_shape, 1)))) { - return errors::InvalidArgument( - strings::StrCat("Weight shape (", c->DebugString(weight_shape), - ") != GradWrtWeight shape (", - c->DebugString(grad_wrt_weight_shape), ")")); - } - - c->set_output(0, c->Matrix(batch_size, input_size)); - - return Status::OK(); -} -} // namespace - -REGISTER_OP("HypercubeInterpolation") - .Input("input: Dtype") - .Output("weights: Dtype") - .Attr("Dtype: {float, double} = DT_FLOAT") - .Attr("lattice_sizes: list(int) = []") - .SetShapeFn(InterpolationShapeFn) - .Doc(R"doc( -Returns a tensor representing interpolation weights in a hypercube lattice -interpolation. - -Inputs - input: 2D tensor, `[?, d]` - -Params - lattice_sizes: 1D int tensor that contains a lattice size per each dimension, - [m_0, ..., m_{d - 1}]. - -Outputs - weights: 2D tensor that contains interpolation weights. - [?, m_0 x m_1 ... x m_{d - 1}]. -)doc"); - -REGISTER_OP("HypercubeGradient") - .Input("input: Dtype") - .Input("weight: Dtype") - .Input("grad_wrt_weight: Dtype") - .Output("grad_wrt_input: Dtype") - .Attr("Dtype: {float, double} = DT_FLOAT") - .Attr("lattice_sizes: list(int) = []") - .SetShapeFn(GradWrtInputShapeFn) - .Doc(R"doc( -Computes gradients of HypercubeInterpolation. Returns a dense gradient. - -Inputs - input: input tensor, `[?, d]`. - grad_wrt_weight: Gradient with respect to the outputs of this operator, - `[?, m_0 x m_1 x .. x m_{d - 1}]` - -Outputs - grad_wrt_input: A gradient tensor, `[?, d]`, with respect to input. -)doc"); - -REGISTER_OP("SimplexInterpolation") - .Input("input: Dtype") - .Output("weights: Dtype") - .Attr("Dtype: {float, double} = DT_FLOAT") - .Attr("lattice_sizes: list(int) = []") - .SetShapeFn(InterpolationShapeFn) - .Doc(R"doc( -Returns a tensor representing interpolation weights in a simplex lattice -interpolation. - -Inputs - input: 2D tensor, `[?, d]` - -Params - lattice_sizes: 1D int tensor that contains a lattice size per each dimension, - [m_0, ..., m_{d - 1}]. - -Outputs - weights: 2D tensor that contains interpolation weights. - [?, m_0 x m_1 ... x m_{d - 1}]. -)doc"); - -REGISTER_OP("SimplexGradient") - .Input("input: Dtype") - .Input("weight: Dtype") - .Input("grad_wrt_weight: Dtype") - .Output("grad_wrt_input: Dtype") - .Attr("Dtype: {float, double} = DT_FLOAT") - .Attr("lattice_sizes: list(int) = []") - .SetShapeFn(GradWrtInputShapeFn) - .Doc(R"doc( -Computes gradients of SimplexInterpolation. Returns a dense gradient. - -Inputs - input: input tensor, `[?, d]`. - grad_wrt_weight: Gradient with respect to the outputs of this operator, - `[?, m_0 x m_1 x .. x m_{d - 1}]` - -Outputs - grad_wrt_input: A gradient tensor, `[?, d]`, with respect to input. -)doc"); - -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/ops/monotone_lattice_ops.cc b/tensorflow_lattice/cc/ops/monotone_lattice_ops.cc deleted file mode 100644 index 9db7124..0000000 --- a/tensorflow_lattice/cc/ops/monotone_lattice_ops.cc +++ /dev/null @@ -1,89 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include - -#include "tensorflow_lattice/cc/lib/lattice_structure.h" -#include "tensorflow/core/framework/common_shape_fns.h" -#include "tensorflow/core/framework/op.h" -#include "tensorflow/core/framework/shape_inference.h" -#include "tensorflow/core/lib/strings/str_util.h" -#include "tensorflow/core/lib/strings/strcat.h" - -namespace tensorflow { -namespace lattice { - -REGISTER_OP("MonotoneLattice") - .Input("lattice_params: Dtype") - .Output("projected_lattice_params: Dtype") - .Attr("Dtype: {float, double} = DT_FLOAT") - .Attr("is_monotone: list(bool) = []") - .Attr("lattice_sizes: list(int) = []") - .Attr("tolerance: float = 1e-7") - .Attr("max_iter: int = 1000") - .SetShapeFn([](shape_inference::InferenceContext* c) { - // Check pre-conditions. - std::vector lattice_sizes; - TF_RETURN_IF_ERROR(c->GetAttr("lattice_sizes", &lattice_sizes)); - if (!LatticeStructure::IsValidLatticeSizes(lattice_sizes)) { - return errors::InvalidArgument(str_util::Join(lattice_sizes, ","), - " is not a valid lattice sizes"); - } - LatticeStructure lattice_structure(lattice_sizes); - - shape_inference::ShapeHandle lattice_params_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &lattice_params_shape)); - if (c->Value(c->Dim(lattice_params_shape, 1)) != - lattice_structure.NumVertices()) { - return errors::InvalidArgument( - strings::StrCat("lattice_params' number of parameters (", - c->DebugString(c->Dim(lattice_params_shape, 1)), - ") != expected number of parameters (", - lattice_structure.NumVertices(), ")")); - } - // Returns the shape of the output. - return shape_inference::UnchangedShapeWithRank(c, 2); - }) - .Doc(R"doc( -Returns a projected lattice parameters onto the monotonicity constraints. - -Monotonicity constraints are specified is_monotone. If is_monotone[k] == True, -then the kth input has a non-decreasing monotonicity, otherwise there will be no -constraints. - -This operator uses an iterative algorithm, Alternating Direction Method of -Multipliers (ADMM) method, to find the projection, so tolerance and max_iter can -be used to control the accuracy vs. the time spent trade-offs in the ADMM -method. - -Inputs - lattice_params: 2D tensor, `[number of outputs, number of parameters]` - -Params - is_monotone: 1D bool tensor that contains whether the kth dimension should be - monotonic. - lattice_sizes: 1D int tensor that contains a lattice size per each dimension, - [m_0, ..., m_{d - 1}]. - tolerance: The tolerance in ||true projection - projection|| in the ADMM - method. - max_iter: Maximum number of iterations in the ADMM method. - -Outputs - projected_lattice_params: 2D tensor, - `[number of outputs, number of parameters]`, that contains the projected - parameters. -)doc"); - -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/ops/monotone_lattice_ops_test.cc b/tensorflow_lattice/cc/ops/monotone_lattice_ops_test.cc deleted file mode 100644 index 8300169..0000000 --- a/tensorflow_lattice/cc/ops/monotone_lattice_ops_test.cc +++ /dev/null @@ -1,187 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include -#include - -#include "tensorflow/core/framework/fake_input.h" -#include "tensorflow/core/framework/node_def_builder.h" -#include "tensorflow/core/framework/shape_inference_testutil.h" -#include "tensorflow/core/framework/tensor.h" -#include "tensorflow/core/framework/tensor_testutil.h" -#include "tensorflow/core/kernels/ops_testutil.h" -#include "tensorflow/core/lib/core/status_test_util.h" -#include "tensorflow/core/platform/logging.h" -#include "tensorflow/core/platform/test.h" - -namespace tensorflow { -namespace lattice { - -namespace { -class MonotoneLatticeOpsTest : public OpsTestBase { - protected: - MonotoneLatticeOpsTest() {} - // Computes the projected_lattice_param_vec and compares the output - // with expected_projected_lattice_param_vec. - // In order to test batch parameter projection, this test method accepts a - // list of lattice_param_vec and a list of - // expected_projected_lattice_param_vec. - void CheckProjection( - const std::vector& lattice_sizes, - const std::vector& is_monotone, - const std::vector>& lattice_param_vecs, - const std::vector>& - expected_projected_lattice_param_vecs) { - constexpr double kEpsilon = 1e-5; - const int num_inputs = lattice_param_vecs.size(); - ASSERT_GT(num_inputs, 0); - const int num_parameters = lattice_param_vecs[0].size(); - - // Pre-condition. - ASSERT_EQ(expected_projected_lattice_param_vecs.size(), num_inputs); - - // Flattening vectors to fill-in tensors. - std::vector flattened_lattice_param_vecs; - std::vector flattened_expected_projection; - flattened_lattice_param_vecs.reserve(num_inputs * num_parameters); - flattened_expected_projection.reserve(num_inputs * num_parameters); - for (int ii = 0; ii < num_inputs; ++ii) { - ASSERT_EQ(lattice_param_vecs[ii].size(), num_parameters); - ASSERT_EQ(expected_projected_lattice_param_vecs[ii].size(), - num_parameters); - for (int jj = 0; jj < num_parameters; ++jj) { - flattened_lattice_param_vecs.push_back(lattice_param_vecs[ii][jj]); - flattened_expected_projection.push_back( - expected_projected_lattice_param_vecs[ii][jj]); - } - } - - // Define tensorflow ops to be tested. - TF_ASSERT_OK(NodeDefBuilder("monotone_lattice", "MonotoneLattice") - .Input(FakeInput(DT_DOUBLE)) - .Attr("lattice_sizes", lattice_sizes) - .Attr("is_monotone", is_monotone) - .Finalize(node_def())); - - TF_ASSERT_OK(InitOp()); - AddInputFromArray(TensorShape({num_inputs, num_parameters}), - flattened_lattice_param_vecs); - TF_ASSERT_OK(RunOpKernel()); - Tensor expected_projection_tensor( - DT_DOUBLE, TensorShape({num_inputs, num_parameters})); - test::FillValues(&expected_projection_tensor, - flattened_expected_projection); - - VLOG(1) << "Lattice parameter tensor: " - << GetInput(0).SummarizeValue(num_parameters); - VLOG(1) << "Expected projection tensor: " - << expected_projection_tensor.SummarizeValue(num_parameters); - VLOG(1) << "Result tensor: " - << GetOutput(0)->SummarizeValue(num_parameters); - test::ExpectTensorNear(expected_projection_tensor, *GetOutput(0), - kEpsilon); - } -}; - -TEST_F(MonotoneLatticeOpsTest, ProjectToNothing) { - CheckProjection( - /*lattice_sizes=*/{2, 2}, /*is_monotone=*/{false, false}, - /*lattice_param_vecs=*/{{3.0, 0.0, 2.0, 5.0}}, - /*expected_projected_lattice_param_vecs=*/{{3.0, 0.0, 2.0, 5.0}}); -} - -TEST_F(MonotoneLatticeOpsTest, ProjectTo0thDimension) { - CheckProjection( - /*lattice_sizes=*/{2, 2}, /*is_monotone=*/{true, false}, - /*lattice_param_vecs=*/{{3.0, 0.0, 2.0, 5.0}}, - /*expected_projected_lattice_param_vecs=*/{{1.5, 1.5, 2.0, 5.0}}); -} - -TEST_F(MonotoneLatticeOpsTest, ProjectTo1stDimension) { - CheckProjection( - /*lattice_sizes=*/{2, 2}, /*is_monotone=*/{false, true}, - /*lattice_param_vecs=*/{{3.0, 0.0, 2.0, 5.0}}, - /*expected_projected_lattice_param_vecs=*/{{2.5, 0.0, 2.5, 5.0}}); -} - -TEST_F(MonotoneLatticeOpsTest, ProjectToAllDimensions) { - CheckProjection( - /*lattice_sizes=*/{2, 2}, /*is_monotone=*/{true, true}, - /*lattice_param_vecs=*/{{3.0, 0.0, 2.0, 5.0}}, - /*expected_projected_lattice_param_vecs=*/{{1.5, 1.5, 2.0, 5.0}}); -} - -TEST_F(MonotoneLatticeOpsTest, ProjectThreeByTwoLatticeToAllDimensions) { - CheckProjection( - /*lattice_sizes=*/{3, 2}, /*is_monotone=*/{true, true}, - /*lattice_param_vecs=*/{{3.0, 1.0, 0.0, 0.0, 2.0, 5.0}}, - /*expected_projected_lattice_param_vecs=*/{ - {1.0, 1.0, 1.0, 1.0, 2.0, 5.0}}); -} - -TEST_F(MonotoneLatticeOpsTest, ProjectMultipleTwoByTwoLatticesToAllDimensions) { - CheckProjection( - /*lattice_sizes=*/{2, 2}, /*is_monotone=*/{true, true}, - /*lattice_param_vecs=*/{{3.0, 0.0, 2.0, 5.0}, - {3.0, 0.0, 2.0, 5.0}, - {0.0, 1.0, 2.0, 3.0}, - {3.0, 3.0, 1.0, 1.0}, - {-1.0, -5.0, 2.0, 3.0}}, - /*expected_projected_lattice_param_vecs=*/{{1.5, 1.5, 2.0, 5.0}, - {1.5, 1.5, 2.0, 5.0}, - {0.0, 1.0, 2.0, 3.0}, - {2.0, 2.0, 2.0, 2.0}, - {-3.0, -3.0, 2.0, 3.0}}); -} - -TEST(MonotoneLatticeOpsShapeTest, CorrectInference) { - ShapeInferenceTestOp op("MonotoneLattice"); - - // 2 x 2 x 2 lattice = 8 parameters. - std::vector lattice_sizes = {2, 2, 2}; - std::vector is_monotone = {true, true, true}; - TF_ASSERT_OK(NodeDefBuilder("test", "MonotoneLattice") - .Input(FakeInput(DT_FLOAT)) - .Attr("lattice_sizes", lattice_sizes) - .Attr("is_monotone", is_monotone) - .Finalize(&op.node_def)); - - INFER_OK(op, "[3,8]", "in0"); - INFER_OK(op, "[10,8]", "in0"); -} - -TEST(MonotoneLatticeOpsShapeTest, WrongShapeShouldFail) { - ShapeInferenceTestOp op("MonotoneLattice"); - - // 2 x 2 x 2 lattice = 8 parameters. - std::vector lattice_sizes = {2, 2, 2}; - std::vector is_monotone = {true, true, true}; - TF_ASSERT_OK(NodeDefBuilder("test", "MonotoneLattice") - .Input(FakeInput(DT_FLOAT)) - .Attr("lattice_sizes", lattice_sizes) - .Attr("is_monotone", is_monotone) - .Finalize(&op.node_def)); - - INFER_ERROR("Shape must be rank 2 but is rank 1", op, "[1]"); - INFER_ERROR("Shape must be rank 2 but is rank 3", op, "[1,2,3]"); - INFER_ERROR( - "lattice_params' number of parameters (3) != expected number of " - "parameters (8)", - op, "[10,3]"); -} - -} // namespace - -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/ops/monotonic_projection_op.cc b/tensorflow_lattice/cc/ops/monotonic_projection_op.cc deleted file mode 100644 index 07614c4..0000000 --- a/tensorflow_lattice/cc/ops/monotonic_projection_op.cc +++ /dev/null @@ -1,62 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include -#include -#include -#include - -#include "tensorflow/core/framework/common_shape_fns.h" -#include "tensorflow/core/framework/op.h" -#include "tensorflow/core/framework/shape_inference.h" - -namespace tensorflow { -namespace lattice { - -REGISTER_OP("MonotonicProjection") - .Input("values: Dtype") - .Input("increasing: bool") - .Output("monotonic: Dtype") - .Attr("Dtype: {float, double} = DT_FLOAT") - .SetShapeFn([](shape_inference::InferenceContext* c) { - // Input must be a vector, and output is the same shape as input. - shape_inference::ShapeHandle values_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &values_shape)); - shape_inference::ShapeHandle increasing_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &increasing_shape)); - - c->set_output(0, values_shape); - return Status::OK(); - }) - .Doc(R"doc( -Returns a not-strict monotonic projection of the vector. - -The returned vector is of the same size as the input and values (optionally) -changed to make them monotonically, minimizing the sum of the square distance -to the original values. - -This is part of the set of ops that support monotonicity in piecewise-linear -calibration. - -Note that the gradient is undefined for this function. - - values: `Tensor` with values to be made monotonic. - increasing: Defines if projection it to monotonic increasing values - or to monotonic decreasing ones. - - monotonic: output `Tensor` with values made monotonic. -)doc"); - -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/ops/monotonic_projection_op_test.cc b/tensorflow_lattice/cc/ops/monotonic_projection_op_test.cc deleted file mode 100644 index 6945c65..0000000 --- a/tensorflow_lattice/cc/ops/monotonic_projection_op_test.cc +++ /dev/null @@ -1,99 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include - -#include "tensorflow/core/framework/fake_input.h" -#include "tensorflow/core/framework/node_def_builder.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/shape_inference_testutil.h" -#include "tensorflow/core/framework/tensor.h" -#include "tensorflow/core/framework/tensor_testutil.h" -#include "tensorflow/core/kernels/ops_testutil.h" -#include "tensorflow/core/lib/core/status_test_util.h" -#include "tensorflow/core/lib/gtl/array_slice.h" -#include "tensorflow/core/platform/logging.h" - -namespace tensorflow { -namespace lattice { - -class MonotonicProjectionOpTest : public OpsTestBase {}; - -TEST_F(MonotonicProjectionOpTest, MonotonicProjection) { - struct Test { - bool increasing; - gtl::ArraySlice before; - gtl::ArraySlice expected; - }; - std::vector tests{ - // No-op. - {true, {}, {}}, - {true, {0, 1}, {0, 1}}, - {false, {1, 0}, {1, 0}}, - {true, {22.9}, {22.9}}, - {false, {22.9}, {22.9}}, - {true, {6.0, 7.0, 8.0}, {6.0, 7.0, 8.0}}, - - // Short dependency. - {true, {1, 0}, {0.5, 0.5}}, - {false, {0, 1}, {0.5, 0.5}}, - - // Long dependencies. - {true, {6.0, 1, 2, 3.5}, {3, 3, 3, 3.5}}, - {true, {10.0, 9.0, 8.0, 7.0, 6.0}, {8.0, 8.0, 8.0, 8.0, 8.0}}, - - // Examples that require back-tracking of pools. - {false, {2, 1, 6}, {3, 3, 3}}, - {true, {4, 5, 0}, {3, 3, 3}}, - {true, {4, 5, 0, 4, -3}, {2, 2, 2, 2, 2}}, - {true, {5.0, 6.0, 5.0, 6.0, 7.0, 6.0}, {5.0, 5.5, 5.5, 6.0, 6.5, 6.5}}, - }; - - for (const auto &test : tests) { - inputs_.clear(); - const int64 test_size = test.before.size(); - LOG(INFO) << "Testing for increasing=" << test.increasing << ", values=[" - << ::tensorflow::str_util::Join(test.before, ", ") << "]"; - - TF_ASSERT_OK(NodeDefBuilder("monotonic_projection:0", "MonotonicProjection") - .Input("values", 0, DT_DOUBLE) - .Input("increasing", 0, DT_BOOL) - .Finalize(node_def())); - TF_ASSERT_OK(InitOp()); - AddInputFromArray(TensorShape({test_size}), test.before); - AddInputFromList(TensorShape(), {test.increasing}); - TF_ASSERT_OK(RunOpKernel()); - - Tensor expected(allocator(), DT_DOUBLE, TensorShape({test_size})); - test::FillValues(&expected, test.expected); - test::ExpectTensorEqual(expected, *GetOutput(0)); - } -} - -TEST_F(MonotonicProjectionOpTest, MonotonicProjection_ShapeFn) { - ShapeInferenceTestOp op("MonotonicProjection"); - TF_ASSERT_OK(NodeDefBuilder("monotonic_projection:1", "MonotonicProjection") - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_BOOL)) - .Finalize(&op.node_def)); - - INFER_OK(op, "[11];[]", "in0"); - INFER_OK(op, "[17];[]", "in0"); - - INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[20,1];[]"); - INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[20];[1]"); -} - -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/ops/pwl_indexing_calibrator_ops.cc b/tensorflow_lattice/cc/ops/pwl_indexing_calibrator_ops.cc deleted file mode 100644 index 1634475..0000000 --- a/tensorflow_lattice/cc/ops/pwl_indexing_calibrator_ops.cc +++ /dev/null @@ -1,190 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -// Implementations of the piecewise linear "Indexing" calibrator: operators -// related to the calculation of the interpolation weights and gradients. -// -// Sparse and dense implementations. -// -// FutureWork: Zero tensors using functor::SetZeroFunctor (device dependent), -#include -#include -#include -#include - -#include "tensorflow/core/framework/common_shape_fns.h" -#include "tensorflow/core/framework/op.h" -#include "tensorflow/core/framework/shape_inference.h" -#include "tensorflow/core/lib/core/errors.h" - -namespace tensorflow { -namespace lattice { - -namespace { - -bool IsSameShape(shape_inference::InferenceContext* c, - const shape_inference::ShapeHandle& shape1, - const shape_inference::ShapeHandle& shape2) { - if (c->Rank(shape1) != c->Rank(shape2)) return false; - for (int ii = 0; ii < c->Rank(shape1); ++ii) { - if (c->Value(c->Dim(shape1, ii)) != c->Value(c->Dim(shape2, ii))) { - return false; - } - } - return true; -} - -} // namespace - -REGISTER_OP("PwlIndexingCalibrator") - .Input("input: Dtype") - .Input("kp_inputs: Dtype") - .Output("weights: Dtype") - .Attr("Dtype: {float, double} = DT_FLOAT") - .SetShapeFn([](shape_inference::InferenceContext* c) { - shape_inference::ShapeHandle input_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &input_shape)); - shape_inference::DimensionHandle batch_size = c->Dim(input_shape, 0); - shape_inference::ShapeHandle kp_input_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &kp_input_shape)); - shape_inference::DimensionHandle num_keypoints = - c->Dim(kp_input_shape, 0); - auto output_shape = c->Matrix(batch_size, num_keypoints); - c->set_output(0, output_shape); - return Status::OK(); - }) - .Doc(R"doc( -Returns tensor representing interpolation weights in a piecewise linear -function. If using a large number of keypoints, try PwlIndexingCalibratorSparse. - -Notice that in this version the keypoints inputs (given by kp_inputs) is kept -fixed by forcing its gradient to be always 0. FutureWork: allow kp_inputs to -also be optimized, by providing a gradient. - -Inputs - input: uncalibrated weights, `[batch_size]` - kp_input: keypoints' input weights, can be initialized with the - pwl_calibrator_initialize_input_keypoints op. `[num_keypoints]` - -Outputs - weights: Interpolation weights for a piecewise linear function. Its shape is - `[batch_size, num_keypoints]`. The dot product of this and the keypoints - output will give the calibrated value. -)doc"); - -REGISTER_OP("PwlIndexingCalibratorGradient") - .Input("input: Dtype") - .Input("kp_inputs: Dtype") - .Input("grad_wrt_weights: Dtype") - .Output("grad_wrt_input: Dtype") - .Output("grad_wrt_kp_inputs: Dtype") - .Attr("Dtype: {float, double} = DT_FLOAT") - .SetShapeFn([](shape_inference::InferenceContext* c) { - shape_inference::ShapeHandle input_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &input_shape)); - shape_inference::DimensionHandle batch_size = c->Dim(input_shape, 0); - - shape_inference::ShapeHandle kp_input_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &kp_input_shape)); - const auto num_keypoints = c->Dim(kp_input_shape, 0); - - auto weights_shape = c->Matrix(batch_size, num_keypoints); - shape_inference::ShapeHandle grad_wrt_weights_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 2, &grad_wrt_weights_shape)); - if (!IsSameShape(c, weights_shape, grad_wrt_weights_shape)) { - return errors::InvalidArgument("grad_wrt_weights has shape ", - c->DebugString(grad_wrt_weights_shape), - ", but weights has shape ", - c->DebugString(weights_shape)); - } - - auto grad_wrt_input_shape = c->Vector(batch_size); - c->set_output(0, grad_wrt_input_shape); - auto grad_wrt_kp_inputs_shape = c->Vector(num_keypoints); - c->set_output(1, grad_wrt_kp_inputs_shape); - return Status::OK(); - }) - .Doc(R"doc( -Computes gradients of PwlIndexingCalibrator. Returns a dense gradient. - -As FutureWork we want to allow kp_inputs to be adjusted dynamically. - -Inputs - input: uncalibrated value, `[batch_size]`. - kp_inputs: keypoints' input weights, can be initialized with the - pwl_calibrator_initialize_input_keypoints op, `[num_keypoints]`. - weights_grad: Gradient with respect to the weights outputs of this operator, - `[batch_size, num_keypoints]`. - -Outputs - grad_wrt_input: gradient with respect to input, `[batch_size]`. - grad_wrt_kp_inputs: gradient with respect to the kp_inputs. This is fixed in 0 - because (for now) the keypoints inputs are fixed, `[num_keypoints]`. - -)doc"); - -REGISTER_OP("PwlIndexingCalibratorSparse") - .Input("input: Dtype") - .Input("kp_inputs: Dtype") - .Output("indices: int64") - .Output("weights: Dtype") - .Attr("Dtype: {float, double} = DT_FLOAT") - .Doc(R"doc( -Returns sparse tensor representing interpolation weights in a piecewise linear -function. - -Inputs - input: uncalibrated weights, `[batch_size]` - kp_input: keypoints' input weights, can be initialized with the - pwl_calibrator_initialize_input_keypoints op. `[num_keypoints]` - -Outputs - indices, weights: Tensors with sparse representation of interpolation weights - for a piecewise linear function in the form of a SparseTensor. At most two - weights will be set per uncalibrated value given. This can be multiplied - by the keypoints' output weights. The tensor will be shaped - `[batch_size, num_keypoints]`. -)doc"); - -REGISTER_OP("PwlIndexingCalibratorSparseGradient") - .Input("input: Dtype") - .Input("kp_inputs: Dtype") - .Input("indices: int64") - .Input("grad_wrt_weights: Dtype") - .Output("grad_wrt_input: Dtype") - .Output("grad_wrt_kp_inputs: Dtype") - .Attr("Dtype: {float, double} = DT_FLOAT") - .Doc(R"doc( -Computes gradients of PwlIndexingCalibratorSparse. Returns (dense) gradients -with respect to the input and to the kp_inputs. - -As FutureWork we want to allow kp_inputs to be adjusted dynamically. - -Inputs - input: uncalibrated value, `[batch_size]`. - kp_inputs: keypoints' input weights, can be initialized with the - pwl_calibrator_initialize_input_keypoints op, `[num_keypoints]`. - indices, weights_grad: indices and weights gradient (gradient - of the loss function with respect to output weights calculated by - PwlIndexingCalibratorSparseOp). They are the sparse representation of a - Tensor of shape `[batch_size, num_keypoints]`. - -Outputs - grad_wrt_input: gradient with respect to input, `[batch_size]`. - grad_wrt_kp_inputs: gradient with respect to the kp_inputs. This is fixed in 0 - because (for now) the keypoints inputs are fixed, `[num_keypoints]`. -)doc"); - -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/ops/pwl_indexing_calibrator_ops_test.cc b/tensorflow_lattice/cc/ops/pwl_indexing_calibrator_ops_test.cc deleted file mode 100644 index 792bc6f..0000000 --- a/tensorflow_lattice/cc/ops/pwl_indexing_calibrator_ops_test.cc +++ /dev/null @@ -1,480 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include - -#include "tensorflow/core/framework/fake_input.h" -#include "tensorflow/core/framework/node_def_builder.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/shape_inference_testutil.h" -#include "tensorflow/core/framework/tensor.h" -#include "tensorflow/core/framework/tensor_testutil.h" -#include "tensorflow/core/kernels/ops_testutil.h" -#include "tensorflow/core/lib/core/status_test_util.h" -#include "tensorflow/core/lib/gtl/array_slice.h" -#include "tensorflow/core/lib/strings/str_util.h" -#include "tensorflow/core/platform/logging.h" - -namespace tensorflow { -namespace lattice { - -using ::tensorflow::gtl::ArraySlice; - -extern void PwlSetTestMode(bool split_batch); - -class PwlIndexingCalibratorOpTest : public OpsTestBase { - protected: - void PwlIndexingCalibratorHelper(const bool use_sparse) { - ArraySlice keypoints_inputs{0.0, 20.0, 40.0, 60.0, 80.0, 100.0}; - const int num_keypoints = keypoints_inputs.size(); - - struct Test { - ArraySlice uncalibrated; - ArraySlice expected_weights; - ArraySlice expected_indices; - }; - std::vector tests{ - // Bounded min. - {{-10.0}, {1.0, 0.0}, {0, 1}}, - - // Bounded max. - {{200.0}, {0.0, 1.0}, {4, 5}}, - - // Exact match. - {{80.0}, {0.0, 1.0, 0.0}, {3, 4, 5}}, - - // Interpolated examples. - {{10.0}, {0.5, 0.5}, {0, 1}}, - {{35.0}, {0.25, 0.75}, {1, 2}}, - }; - - LOG(INFO) << "Keypoints inputs: " - << "[" << str_util::Join(keypoints_inputs, ",") << "]"; - for (const auto &test : tests) { - inputs_.clear(); - TF_ASSERT_OK(NodeDefBuilder("pwl_indexing_calibrator:0", - use_sparse ? "PwlIndexingCalibratorSparse" - : "PwlIndexingCalibrator") - .Input(FakeInput(DT_DOUBLE)) // - .Input(FakeInput(DT_DOUBLE)) - .Finalize(node_def())); - TF_ASSERT_OK(InitOp()); - int batch_size = test.uncalibrated.size(); - AddInputFromArray(TensorShape({batch_size}), test.uncalibrated); - AddInputFromArray(TensorShape({6}), keypoints_inputs); - LOG(INFO) << "Testing for uncalibrated=" - << "[" << str_util::Join(test.uncalibrated, ",") << "]"; - TF_ASSERT_OK(RunOpKernel()); - - if (use_sparse) { - // Sparse implementation. - Tensor expected_weights( - allocator(), DT_DOUBLE, - TensorShape({static_cast(test.expected_weights.size())})); - test::FillValues(&expected_weights, test.expected_weights); - test::ExpectTensorEqual(expected_weights, *GetOutput(1)); - - Tensor expected_indices( - allocator(), DT_INT64, - TensorShape({static_cast(test.expected_indices.size()), 2})); - std::vector flattened_indices_with_batch; - for (int64 index : test.expected_indices) { - flattened_indices_with_batch.push_back(0); // batch index, always 0 - flattened_indices_with_batch.push_back(index); - } - test::FillValues(&expected_indices, - flattened_indices_with_batch); - LOG(INFO) << "Expected: " - << "[" << str_util::Join(test.expected_indices, ",") << "]"; - - test::ExpectTensorEqual(expected_indices, *GetOutput(0)); - - } else { - // Dense implementation. - Tensor expected_weights( - allocator(), DT_DOUBLE, - TensorShape({1, static_cast(num_keypoints)})); - std::vector weights(num_keypoints, 0); - for (int i = 0; i < test.expected_weights.size(); i++) { - weights[test.expected_indices[i]] = test.expected_weights[i]; - } - test::FillValues(&expected_weights, weights); - test::ExpectTensorEqual(expected_weights, *GetOutput(0)); - } - } - - // Test batch version - inputs_.clear(); - TF_ASSERT_OK(NodeDefBuilder("pwl_indexing_calibrator:1", - use_sparse ? "PwlIndexingCalibratorSparse" - : "PwlIndexingCalibrator") - .Input(FakeInput(DT_DOUBLE)) // - .Input(FakeInput(DT_DOUBLE)) - .Finalize(node_def())); - TF_ASSERT_OK(InitOp()); - - std::vector all_uncalibrated; - for (const auto &test : tests) { - all_uncalibrated.push_back(test.uncalibrated[0]); - } - AddInputFromArray( - TensorShape({static_cast(all_uncalibrated.size())}), - all_uncalibrated); - - AddInputFromArray(TensorShape({6}), keypoints_inputs); - - LOG(INFO) << "Testing for batch of all uncalibrated values: uncalibrated=" - << "[" << str_util::Join(all_uncalibrated, ",") << "]"; - TF_ASSERT_OK(RunOpKernel()); - - if (use_sparse) { - // Sparse implementation. - std::vector vec_indices; - std::vector vec_weights; - for (int j = 0; j < tests.size(); j++) { - const Test &test = tests[j]; - for (int64 idx : test.expected_indices) { - // Each example takes two coordinates. - vec_indices.push_back(j); - vec_indices.push_back(idx); - } - for (double w : test.expected_weights) { - vec_weights.push_back(w); - } - } - - Tensor expected_weights( - allocator(), DT_DOUBLE, - TensorShape({static_cast(vec_weights.size())})); - test::FillValues(&expected_weights, vec_weights); - - Tensor expected_indices( - allocator(), DT_INT64, - TensorShape({static_cast(vec_weights.size()), 2})); - test::FillValues(&expected_indices, vec_indices); - - const Tensor &output_indices = *GetOutput(0); - const Tensor &output_weights = *GetOutput(1); - test::ExpectTensorEqual(expected_indices, output_indices); - test::ExpectTensorEqual(expected_weights, output_weights); - - } else { - // Batch dense version. - Tensor expected_weights(allocator(), DT_DOUBLE, - TensorShape({static_cast(tests.size()), - static_cast(num_keypoints)})); - std::vector weights(tests.size() * num_keypoints, 0); - for (int j = 0; j < tests.size(); j++) { - const Test &test = tests[j]; - for (int i = 0; i < test.expected_weights.size(); i++) { - weights[j * num_keypoints + test.expected_indices[i]] = - test.expected_weights[i]; - } - } - test::FillValues(&expected_weights, weights); - test::ExpectTensorEqual(expected_weights, *GetOutput(0)); - } - } - - void PwlIndexingCalibratorFloatHelper(const bool use_sparse) { - ArraySlice keypoints_inputs{0.0, 20.0, 40.0, 60.0, 80.0, 100.0}; - const int num_keypoints = keypoints_inputs.size(); - - TF_ASSERT_OK(NodeDefBuilder("pwl_indexing_calibrator:0", - use_sparse ? "PwlIndexingCalibratorSparse" - : "PwlIndexingCalibrator") - .Input(FakeInput(DT_FLOAT)) // - .Input(FakeInput(DT_FLOAT)) - .Finalize(node_def())); - TF_ASSERT_OK(InitOp()); - constexpr float uncalibrated = 200.0; - AddInputFromArray(TensorShape({1}), {uncalibrated}); - AddInputFromArray(TensorShape({6}), keypoints_inputs); - TF_ASSERT_OK(RunOpKernel()) << "Failed for uncalibrated=" - << "[" << uncalibrated << "]"; - LOG(INFO) << "Testing for uncalibrated=" - << "[" << uncalibrated << "]"; - - if (use_sparse) { - // Sparse implementation. - Tensor expected_weights(allocator(), DT_FLOAT, TensorShape({2})); - test::FillValues(&expected_weights, {0.0, 1.0}); - test::ExpectTensorEqual(expected_weights, *GetOutput(1)); - - Tensor expected_indices(allocator(), DT_INT64, TensorShape({2, 2})); - test::FillValues(&expected_indices, {0, 4, 0, 5}); - test::ExpectTensorEqual(expected_indices, *GetOutput(0)); - - } else { - // Dense implementation. - Tensor expected_weights( - allocator(), DT_FLOAT, - TensorShape({1, static_cast(num_keypoints)})); - std::vector values(num_keypoints, 0); - values[5] = 1; - test::FillValues(&expected_weights, values); - test::ExpectTensorEqual(expected_weights, *GetOutput(0)); - } - } - - void PwlIndexingCalibratorGradientHelper(const bool use_sparse) { - ArraySlice keypoints_inputs{0.0, 20.0, 40.0, 60.0, 80.0, 100.0}; - ArraySlice weights_grad{0.0, 1.0, 2.0, 4.0, 8.0, 10.0}; - ArraySlice grad_wrt_kp_inputs_values{0, 0, 0, 0, 0, 0}; - - struct Test { - ArraySlice uncalibrated; - ArraySlice interpolation_weights; - - // interpolation_indices: 2 numbers per value: batch_index, weight_index. - ArraySlice interpolation_indices; - - ArraySlice grad_wrt_input; - - // Indices that would been used by the sparse interpolation, see - // FindExpandedInterpolationIndices. - std::vector keypoint_indices; - }; - std::vector tests{ - // At min, gradient should be based on slope of the first piece. - {{-10.0}, {1.0, 0.0}, {0, 1}, {1.0 / 20.0}}, - - // At max, gradient should be based on slope of the last piece. - {{200.0}, {0.0, 1.0}, {4, 5}, {2.0 / 20.0}}, - - // At a keypoint, slope should be mean of two slopes: - {{40.0}, {0.0, 1.0, 0.0}, {1, 2, 3}, {(1.0 / 20 + 2.0 / 20) / 2}}, - - // Interpolated examples. - {{10.0}, {0.5, 0.5}, {0, 1}, {1.0 / 20}}, - {{75.0}, {0.25, 0.75}, {3, 4}, {4.0 / 20}}, - }; - - LOG(INFO) << "Keypoints inputs: " - << "[" << str_util::Join(keypoints_inputs, ",") << "]"; - - for (const auto &test : tests) { - inputs_.clear(); - if (use_sparse) { - TF_ASSERT_OK(NodeDefBuilder("pwl_indexing_calibrator_gradient:0", - "PwlIndexingCalibratorSparseGradient") - .Input(FakeInput(DT_DOUBLE)) - .Input(FakeInput(DT_DOUBLE)) - .Input(FakeInput(DT_INT64)) - .Input(FakeInput(DT_DOUBLE)) - .Finalize(node_def())); - } else { - TF_ASSERT_OK(NodeDefBuilder("pwl_indexing_calibrator_gradient:0", - "PwlIndexingCalibratorGradient") - .Input(FakeInput(DT_DOUBLE)) - .Input(FakeInput(DT_DOUBLE)) - .Input(FakeInput(DT_DOUBLE)) - .Finalize(node_def())); - } - TF_ASSERT_OK(InitOp()); - - // Input being calibrated. - AddInputFromArray(TensorShape({1}), test.uncalibrated); - - // Parameters of calibration: the keypoints input values. - AddInputFromArray(TensorShape({6}), keypoints_inputs); - - // The gradient with respect to the output: presumably the keypoints - // outputs if they are the last layer. - if (use_sparse) { - // Add interpolation indices, that will be provided for sparse - // gradients. - std::vector flattened_interpolation_indices_with_batch; - for (const int64 weight_index : test.interpolation_indices) { - flattened_interpolation_indices_with_batch.push_back(0); // batch_idx - flattened_interpolation_indices_with_batch.push_back(weight_index); - } - AddInputFromArray( - TensorShape( - {static_cast(test.interpolation_indices.size()), 2}), - flattened_interpolation_indices_with_batch); - - std::vector sparse_weights_grad; - for (const int64 weight_index : test.interpolation_indices) { - sparse_weights_grad.push_back(weights_grad[weight_index]); - } - AddInputFromArray( - TensorShape({static_cast(sparse_weights_grad.size())}), - sparse_weights_grad); - } else { - AddInputFromArray(TensorShape({1, 6}), weights_grad); - } - LOG(INFO) << "Testing for uncalibrated=" - << "[" << str_util::Join(test.uncalibrated, ",") << "]"; - TF_ASSERT_OK(RunOpKernel()); - - Tensor grad_wrt_input(allocator(), DT_DOUBLE, TensorShape({1})); - test::FillValues(&grad_wrt_input, test.grad_wrt_input); - test::ExpectTensorEqual(grad_wrt_input, *GetOutput(0)); - - Tensor grad_wrt_kp_inputs( - allocator(), DT_DOUBLE, - TensorShape({static_cast(grad_wrt_kp_inputs_values.size())})); - test::FillValues(&grad_wrt_kp_inputs, grad_wrt_kp_inputs_values); - test::ExpectTensorEqual(grad_wrt_kp_inputs, *GetOutput(1)); - } - - // Evaluate all tests in one batch. - inputs_.clear(); - if (use_sparse) { - TF_ASSERT_OK(NodeDefBuilder("pwl_indexing_calibrator_gradient:0", - "PwlIndexingCalibratorSparseGradient") - .Input(FakeInput(DT_DOUBLE)) - .Input(FakeInput(DT_DOUBLE)) - .Input(FakeInput(DT_INT64)) - .Input(FakeInput(DT_DOUBLE)) - .Finalize(node_def())); - } else { - TF_ASSERT_OK(NodeDefBuilder("pwl_indexing_calibrator_gradient:0", - "PwlIndexingCalibratorGradient") - .Input(FakeInput(DT_DOUBLE)) - .Input(FakeInput(DT_DOUBLE)) - .Input(FakeInput(DT_DOUBLE)) - .Finalize(node_def())); - } - TF_ASSERT_OK(InitOp()); - - // Input being calibrated. - std::vector all_uncalibrated; - for (const auto &test : tests) { - all_uncalibrated.push_back(test.uncalibrated[0]); - } - AddInputFromArray(TensorShape({static_cast(tests.size())}), - all_uncalibrated); - - // Parameters of calibration: the keypoints' input values. - AddInputFromArray(TensorShape({6}), keypoints_inputs); - - // The gradient with respect to the output: presumably the keypoints' - // outputs if they are the last layer. - if (use_sparse) { - std::vector grad_wrt_weights_sparse; - std::vector interpolation_indices_with_batch; - for (int batch_index = 0; batch_index < tests.size(); batch_index++) { - const auto &test = tests[batch_index]; - for (const int weight_index : test.interpolation_indices) { - grad_wrt_weights_sparse.push_back(weights_grad[weight_index]); - interpolation_indices_with_batch.push_back(batch_index); - interpolation_indices_with_batch.push_back(weight_index); - } - } - AddInputFromArray( - TensorShape({static_cast(grad_wrt_weights_sparse.size()), 2}), - interpolation_indices_with_batch); - AddInputFromArray( - TensorShape({static_cast(grad_wrt_weights_sparse.size())}), - grad_wrt_weights_sparse); - } else { - // Repeat weights_grad for each test. - std::vector repeated_weights_grad; - for (int i = 0; i < tests.size(); i++) { - for (const double w : weights_grad) { - repeated_weights_grad.push_back(w); - } - } - AddInputFromArray( - TensorShape({static_cast(tests.size()), 6}), - repeated_weights_grad); - } - LOG(INFO) << "Testing for all tests in one batch"; - TF_ASSERT_OK(RunOpKernel()); - - Tensor grad_wrt_input(allocator(), DT_DOUBLE, - TensorShape({static_cast(tests.size())})); - std::vector all_grad_wrt_input; - for (const auto &test : tests) { - all_grad_wrt_input.push_back(test.grad_wrt_input[0]); - } - test::FillValues(&grad_wrt_input, all_grad_wrt_input); - test::ExpectTensorEqual(grad_wrt_input, *GetOutput(0)); - - Tensor grad_wrt_kp_inputs( - allocator(), DT_DOUBLE, - TensorShape({static_cast(grad_wrt_kp_inputs_values.size())})); - test::FillValues(&grad_wrt_kp_inputs, grad_wrt_kp_inputs_values); - test::ExpectTensorEqual(grad_wrt_kp_inputs, *GetOutput(1)); - } -}; - -TEST_F(PwlIndexingCalibratorOpTest, PwlIndexingCalibratorDense) { - LOG(INFO) << "Process whole batch at once: (split_batch=false)"; - PwlSetTestMode(/*split_batch=*/false); - PwlIndexingCalibratorHelper(false); - - LOG(INFO) << "Process whole batch in splits: (split_batch=true)"; - PwlSetTestMode(/*split_batch=*/true); - PwlIndexingCalibratorHelper(false); - PwlSetTestMode(/*split_batch=*/false); -} - -TEST_F(PwlIndexingCalibratorOpTest, PwlIndexingCalibratorSparse) { - PwlIndexingCalibratorHelper(true); -} - -TEST_F(PwlIndexingCalibratorOpTest, PwlIndexingCalibratorFloatDense) { - PwlIndexingCalibratorFloatHelper(false); -} - -TEST_F(PwlIndexingCalibratorOpTest, PwlIndexingCalibratorFloatSparse) { - PwlIndexingCalibratorFloatHelper(true); -} - -TEST_F(PwlIndexingCalibratorOpTest, PwlIndexingCalibratorGradientDense) { - PwlIndexingCalibratorGradientHelper(false); -} - -TEST_F(PwlIndexingCalibratorOpTest, PwlIndexingCalibratorGradientSparse) { - PwlIndexingCalibratorGradientHelper(true); -} - -TEST_F(PwlIndexingCalibratorOpTest, PwlIndexingCalibrator_ShapeFn) { - ShapeInferenceTestOp op("PwlIndexingCalibrator"); - TF_ASSERT_OK(NodeDefBuilder("test", "PwlIndexingCalibrator") - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Finalize(&op.node_def)); - - INFER_OK(op, "[20];[10]", "[d0_0,d1_0]"); - INFER_OK(op, "[?];[10]", "[d0_0,d1_0]"); - - INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[20,1];[10]"); -} - -TEST_F(PwlIndexingCalibratorOpTest, PwlIndexingCalibratorGradient_ShapeFn) { - ShapeInferenceTestOp op("PwlIndexingCalibratorGradient"); - TF_ASSERT_OK(NodeDefBuilder("pwl_indexing_calibrator_gradient:1", - "PwlIndexingCalibratorGradient") - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Finalize(&op.node_def)); - - INFER_OK(op, "[11];[13];[11,13]", "[d0_0];[d1_0]"); - INFER_OK(op, "[?];[7];[?,7]", "[d0_0];[d1_0]"); - - INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[20,1];[11];[20,11]"); - INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[20];[11,1];[20,11]"); - INFER_ERROR("Shape must be rank 2 but is rank 1", op, "[20];[11];[20]"); - INFER_ERROR( - "grad_wrt_weights has shape [17,11], but weights has shape [20,11]", op, - "[20];[11];[17,11]"); -} - -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/ops/simplex_interpolation_ops_test.cc b/tensorflow_lattice/cc/ops/simplex_interpolation_ops_test.cc deleted file mode 100644 index 92a8ae8..0000000 --- a/tensorflow_lattice/cc/ops/simplex_interpolation_ops_test.cc +++ /dev/null @@ -1,143 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include -#include - -#include "tensorflow/core/framework/fake_input.h" -#include "tensorflow/core/framework/node_def_builder.h" -#include "tensorflow/core/framework/shape_inference_testutil.h" -#include "tensorflow/core/framework/tensor.h" -#include "tensorflow/core/framework/tensor_testutil.h" -#include "tensorflow/core/kernels/ops_testutil.h" -#include "tensorflow/core/lib/core/status_test_util.h" -#include "tensorflow/core/platform/logging.h" -#include "tensorflow/core/platform/test.h" - -namespace tensorflow { -namespace lattice { - -class SimplexInterpolationOpsTest : public OpsTestBase { - protected: - SimplexInterpolationOpsTest() {} -}; - -TEST_F(SimplexInterpolationOpsTest, ThreeDoubleLattice) { - const std::vector lattice_sizes = {3}; - TF_ASSERT_OK(NodeDefBuilder("simplex_interpolation", "SimplexInterpolation") - .Input(FakeInput(DT_DOUBLE)) - .Attr("lattice_sizes", lattice_sizes) - .Finalize(node_def())); - TF_ASSERT_OK(InitOp()); - // Input tensor = [[-1], [0], [0.2], [0.8], [1.0], [1.3], [2.0], [2.5]]. - AddInputFromArray(TensorShape({8, 1}), - {-1.0, 0.0, 0.2, 0.8, 1.0, 1.3, 2.0, 2.5}); - TF_ASSERT_OK(RunOpKernel()); - // expected weight = [[1, 0, 0], [1, 0, 0], [0.8, 0.2, 0], [0.2, 0.8, 0], - // [0, 1, 0], [0, 0.7, 0.3], [0, 0, 1.0], [0, 0, 1.0]]. - Tensor expected_weights(DT_DOUBLE, TensorShape({8, 3})); - test::FillValues( - &expected_weights, - {1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.8, 0.2, 0.0, 0.2, 0.8, 0.0, - 0.0, 1.0, 0.0, 0.0, 0.7, 0.3, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0}); - - LOG(INFO) << "Input: " << GetInput(0).SummarizeValue(8); - LOG(INFO) << "Expected weight: " << expected_weights.SummarizeValue(24); - LOG(INFO) << "Result: " << GetOutput(0)->SummarizeValue(24); - test::ExpectTensorEqual(expected_weights, *GetOutput(0)); -} - -TEST_F(SimplexInterpolationOpsTest, TwoByTwoFloatLattice) { - const std::vector lattice_sizes = {2, 2}; - TF_ASSERT_OK(NodeDefBuilder("simplex_interpolation", "SimplexInterpolation") - .Input(FakeInput(DT_FLOAT)) - .Attr("lattice_sizes", lattice_sizes) - .Finalize(node_def())); - TF_ASSERT_OK(InitOp()); - // Input tensor = [[0, 0], [0, 1], [1, 0], [1, 1], [0.5, 0.5], [0.2, 0.8], - // [0.2, 0.3]] - AddInputFromArray( - TensorShape({7, 2}), - {0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.5, 0.5, 0.2, 0.8, 0.2, 0.3}); - TF_ASSERT_OK(RunOpKernel()); - // expected weight = [[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1], - // [0.5, 0, 0, 0.5], [0.2, 0, 0.6, 0.2], [0.7, 0, 0.1, 0.2] - Tensor expected_weights(DT_FLOAT, TensorShape({7, 4})); - test::FillValues( - &expected_weights, - {1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 1.0, 0.5, 0.0, 0.0, 0.5, 0.2, 0.0, 0.6, 0.2, 0.7, 0.0, 0.1, 0.2}); - - LOG(INFO) << "Input: " << GetInput(0).SummarizeValue(14); - LOG(INFO) << "Expected weight: " << expected_weights.SummarizeValue(28); - LOG(INFO) << "Result: " << GetOutput(0)->SummarizeValue(28); - test::ExpectTensorEqual(expected_weights, *GetOutput(0)); -} - -TEST(SimplexInterpolationOpsShapeTest, SimplexInterpolation_ShapeFn) { - ShapeInferenceTestOp op("SimplexInterpolation"); - - // Total number of weights = 3 x 2 x 3 = 18. - // Output dimension is always 18. - std::vector lattice_sizes = {3, 2, 3}; - TF_ASSERT_OK(NodeDefBuilder("test", "SimplexInterpolation") - .Input(FakeInput(DT_FLOAT)) - .Attr("lattice_sizes", lattice_sizes) - .Finalize(&op.node_def)); - - INFER_OK(op, "[10,3]", "[d0_0,18]"); - INFER_OK(op, "[?,3]", "[d0_0,18]"); - - INFER_ERROR("", op, "[?,?]"); - INFER_ERROR("Shape must be rank 2 but is rank 3", op, "[?,?,1]"); - INFER_ERROR("Shape must be rank 2 but is rank 1", op, "[10]"); - INFER_ERROR("Dimension must be 3 but is 2", op, "[?,2]"); - INFER_ERROR("Dimension must be 3 but is 2", op, "[5,2]"); -} - -TEST(SimplexGradientOpsShapeTest, SimplexGradient_ShapeFn) { - ShapeInferenceTestOp op("SimplexGradient"); - - // Total number of weights = 3 x 2 x 3 = 18. - // Output dimension is always 18. - std::vector lattice_sizes = {3, 2, 3}; - TF_ASSERT_OK(NodeDefBuilder("test", "SimplexGradient") - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Input(FakeInput(DT_FLOAT)) - .Attr("lattice_sizes", lattice_sizes) - .Finalize(&op.node_def)); - - INFER_OK(op, "[10,3];[10,18];[10,18]", "[d0_0,d0_1]"); - INFER_OK(op, "[?,3];[?,18];[?,18]", "[d0_0,d0_1]"); - - INFER_ERROR("", op, "[?,?]"); - INFER_ERROR("Shape must be rank 2 but is rank 3", op, "[?,?,1];[?,1];[?,1]"); - INFER_ERROR("Shape must be rank 2 but is rank 1", op, "[10];[?,?,1];[?,?,1]"); - INFER_ERROR("Dimension must be 3 but is 2", op, "[?,2];[2,3];[2,3]"); - INFER_ERROR("Dimension must be 3 but is 2", op, "[5,2];[5,5];[5,5]"); - INFER_ERROR("Shape must be rank 2 but is rank 3", op, "[2,3];[?,1,3];[?,1]"); - INFER_ERROR("Shape must be rank 2 but is rank 1", op, "[2,3];[10];[10]"); - INFER_ERROR("Input batch size (2) != Weight batch size (5)", op, - "[2,3];[5,18];[5,18]"); - INFER_ERROR("Weight shape ([2,18]) != GradWrtWeight shape ([5,18])", op, - "[2,3];[2,18];[5,18]"); - INFER_ERROR("Weight shape ([2,18]) != GradWrtWeight shape ([2,15])", op, - "[2,3];[2,18];[2,15]"); - INFER_ERROR("Dimension must be 18 but is 17", op, "[?,3];[?,17];[?,17]"); - INFER_ERROR("Dimension must be 18 but is 5", op, "[5,3];[5,5];[5,5]"); -} - -} // namespace lattice -} // namespace tensorflow diff --git a/tensorflow_lattice/cc/test_tools/test_main.cc b/tensorflow_lattice/cc/test_tools/test_main.cc deleted file mode 100644 index 6fcb6fe..0000000 --- a/tensorflow_lattice/cc/test_tools/test_main.cc +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright 2017 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -// A program with a main that is suitable for unittests. -#include "tensorflow/core/platform/test.h" -#include "tensorflow/core/platform/test_benchmark.h" - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - tensorflow::testing::RunBenchmarks(); - return RUN_ALL_TESTS(); -} diff --git a/tensorflow_lattice/cc/tflite_ops/BUILD b/tensorflow_lattice/cc/tflite_ops/BUILD deleted file mode 100644 index 12bbac8..0000000 --- a/tensorflow_lattice/cc/tflite_ops/BUILD +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2018 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -# This package makes the custom ops developed for tensorflow_lattice available -# in the form of tf-lite ops. For help with integration, reach out to -# tensorflow_lattice team -load( - "@org_tensorflow//tensorflow:tensorflow.bzl", - "tf_cc_test", -) - -package(default_visibility = [ - "//visibility:public", -]) - -licenses(["notice"]) # Apache 2.0 License - -exports_files(["LICENSE"]) - -cc_library( - name = "tflite_ops", - srcs = [ - "helpers.h", - "interpolation.cc", - "pwl_indexing_calibrator.cc", - ], - hdrs = [ - "tflite_ops.h", - ], - deps = [ - "@org_tensorflow//tensorflow/lite:framework", - "@org_tensorflow//tensorflow/lite/kernels:kernel_util", - "@org_tensorflow//tensorflow/lite/kernels/internal:reference", - "@flatbuffers", - ], -) - -cc_library( - name = "tflite_ops_cc", - srcs = ["tflite_ops.cc"], - hdrs = ["tflite_ops.h"], - deps = ["@org_tensorflow//tensorflow/lite:framework"], -) - -tf_cc_test( - name = "hypercube_interpolation_test", - size = "small", - srcs = [ - "hypercube_interpolation_test.cc", - ], - deps = [ - ":tflite_ops", - "@org_tensorflow//tensorflow/lite:framework", - "@org_tensorflow//tensorflow/lite:string_util", - "@org_tensorflow//tensorflow/lite/kernels:builtin_ops", - "@org_tensorflow//tensorflow/lite/kernels:kernel_util", - "@org_tensorflow//tensorflow/lite/kernels:test_util", - "@org_tensorflow//tensorflow/lite/kernels/internal:reference", - "@com_google_googletest//:gtest", - "@flatbuffers", - ], -) - -tf_cc_test( - name = "simplex_interpolation_test", - size = "small", - srcs = [ - "simplex_interpolation_test.cc", - ], - deps = [ - ":tflite_ops", - "@org_tensorflow//tensorflow/lite:framework", - "@org_tensorflow//tensorflow/lite:string_util", - "@org_tensorflow//tensorflow/lite/kernels:builtin_ops", - "@org_tensorflow//tensorflow/lite/kernels:kernel_util", - "@org_tensorflow//tensorflow/lite/kernels:test_util", - "@org_tensorflow//tensorflow/lite/kernels/internal:reference", - "@com_google_googletest//:gtest", - "@flatbuffers", - ], -) - -tf_cc_test( - name = "pwl_indexing_calibrator_test", - size = "small", - srcs = [ - "pwl_indexing_calibrator_test.cc", - ], - deps = [ - ":tflite_ops", - "@org_tensorflow//tensorflow/lite:framework", - "@org_tensorflow//tensorflow/lite:string_util", - "@org_tensorflow//tensorflow/lite/kernels:builtin_ops", - "@org_tensorflow//tensorflow/lite/kernels:kernel_util", - "@org_tensorflow//tensorflow/lite/kernels:test_util", - "@org_tensorflow//tensorflow/lite/kernels/internal:reference", - "@com_google_googletest//:gtest", - "@flatbuffers", - ], -) - -tf_cc_test( - name = "pwl_indexing_calibrator_sparse_test", - size = "small", - srcs = [ - "pwl_indexing_calibrator_sparse_test.cc", - ], - deps = [ - ":tflite_ops", - "@org_tensorflow//tensorflow/lite:framework", - "@org_tensorflow//tensorflow/lite:string_util", - "@org_tensorflow//tensorflow/lite/kernels:builtin_ops", - "@org_tensorflow//tensorflow/lite/kernels:kernel_util", - "@org_tensorflow//tensorflow/lite/kernels:test_util", - "@org_tensorflow//tensorflow/lite/kernels/internal:reference", - "@com_google_googletest//:gtest", - "@flatbuffers", - ], -) - -py_binary( - name = "toco_wrapper", - srcs = ["toco_wrapper.py"], - python_version = "PY2", - deps = [ - ":tflite_ops", - "//tensorflow_lattice", - "@org_tensorflow//tensorflow/lite/python:tflite_convert_main_lib", - ], -) - -py_binary( - name = "freeze_graph_wrapper", - srcs = ["freeze_graph_wrapper.py"], - python_version = "PY2", - srcs_version = "PY2AND3", - deps = [ - "//tensorflow_lattice", - "@org_tensorflow//tensorflow/python/tools:freeze_graph_main_lib", - ], -) diff --git a/tensorflow_lattice/cc/tflite_ops/README.md b/tensorflow_lattice/cc/tflite_ops/README.md deleted file mode 100644 index 4bcb005..0000000 --- a/tensorflow_lattice/cc/tflite_ops/README.md +++ /dev/null @@ -1,202 +0,0 @@ - - -# tensorflow_lattice on TF-Lite - -## Concepts - -__TF-Lite__ Framework in tensorflow/contrib for evaluating TF graphs on -low-power platforms - -__TOCO__ Tool for converting saved tensorflow graphs to tf-lite format -[TOCO docs](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/toco/g3doc/cmdline_examples.md) - -## Introduction - -This document describes how to use a lattice model on a low-power platform by -converting it to a Tensorflow-lite model which can then be run on the device. -This allows for inferences to be done without wifi or server costs. - -## Use Notes - -These tf-lite ops are necessary when running a tf-lite model that includes any -custom tf-lattice ops. Typically, a TF model saved in frozen_graph format will -be converted with TOCO. The output of TOCO can then be run (on device) with -TF-Lite. There are two integration tasks corresponding to these two steps: - -### Use TOCO to convert a saved Tensorflow graph - -TOCO, as explained above, operates on the output of the `tflite_convert` -utility. In order for this utility to work properly, Tensorflow itself must have -already loaded any custom ops that are needed. This is done 'lazily', so that -Tensorflow, and consequently TOCO, will fail to find a custom op that has not -yet been loaded. This is the purpose of the `toco_wrapper` target in this -directory. It triggers the loading of tensorflow_lattice ops by importing the -`tensorflow_lattice` python package. The wrapper script simply makes this import -and then calls `tflite_convert`. Use `toco_wrapper` with the same arguments that -you wish passed on to `tflite_convert`. - -### Make the tf-lite op visible to the tf-lite interpreter - -The low level code that instantiates and calls the tf-lite interpreter must be -modified to register the custom op. __The registration is done _in situ_ by the -team who wish to use the ops.__ Remember to add the `'tflite_ops` dependency to -build target. - -## Example Commands - -Example code for registering op: - -```c++ -#include "third_party/py/tensorflow_lattice/cc/tflite_ops/tflite_ops.h" - -namespace tflite { - -// ... - -tflite::ops::builtin::BuiltinOpResolver resolver; -// this is the key addition -RegisterTfLatticeOps(&resolver); -``` - -Example commands, useful for testing that an op is reachable: - -``` -$ toco_wrapper \ - --output_file=/tmp/xo.tflite \ - --graph_def_file=/usr/local/google/home/epenn/Downloads/frozen_graph.pb \ - --input_arrays=deploy/Placeholder \ - --output_arrays=deploy/regression/MatMul \ - --allow_custom_ops - -# This command will fail unless an edit like that described above is made to -# .../lite/tools/benchmark/benchmark_tflite_model.cc -$ bazel run tensorflow/lite/tools/benchmark:benchmark_model \ - -- --graph=/tmp/xo.tflite - -``` - -If successful, the last command will print a summary of run timings. - -## Full Example - -### Build model - -Consider the following simple tf_lattice model. Note where the model directory -is being set, this information will be important later. - -```python -import numpy as np - -import tensorflow as tf -import tensorflow_lattice as tfl - -# Feature definition. -feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), -] - -# Hyperparameters. -num_keypoints = 10 -hparams = tfl.CalibratedRtlHParams( - num_keypoints=num_keypoints, - num_lattices=5, - lattice_rank=2, - learning_rate=0.1) -def init_fn(): - return tfl.uniform_keypoints_for_signal(num_keypoints, - input_min=-1.0, - input_max=1.0, - output_min=0.0, - output_max=1.0) - -# Estimator. -rtl_estimator = tfl.calibrated_rtl_regressor( - model_dir='/tmp/tfl_estimator_0', # Set model directory - feature_columns=feature_columns, - hparams=hparams, - keypoints_initializers_fn=init_fn -) - -# Prepare the dataset. -num_examples = 1000 -x0 = np.random.uniform(-1.0, 1.0, size=num_examples) -x1 = np.random.uniform(-1.0, 1.0, size=num_examples) -y = x0 ** 2 + x1 ** 2 - -# Example input function. -twod_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x={'x0': x0, - 'x1': x1}, - y=y, - batch_size=10, - num_epochs=1, - shuffle=False) - -# Train! -rtl_estimator.train(input_fn=twod_input_fn) -# Evaluate! -print(rtl_estimator.evaluate(input_fn=twod_input_fn)) -``` - -### Determine input and output nodes - -In order to use the conversion utilities below, it is necessary to know which -nodes in the tensorflow model graph are to be used as input and output. This can -be tricky, especially when using the estimator API. - -To visually inspect the graph, run the following: - -```bash -$ MODEL_DIR=/tmp/tfl_estimator_0 # from above -$ tensorboard --logdir $MODEL_DIR # use the model directory specified above -``` - -For this example, the following nodes will be used for input and output: - -```bash -$ INPUT_NODE=tfl_calibrated_rtl/feature_column_transformation/input_layer/concat -$ OUTPUT_NODE=tfl_calibrated_rtl/add -``` - -### Convert trained model to frozen graph format using frozen_graph_wrapper - -This conversion uses the tensorflow `frozen_graph` utility. As with -`tflite_convert` (TOCO), this utility requires that tensorflow has loaded the -tensorflow_lattice custom ops. In order to facilitate this, a simple wrapper is -provided. - -```bash -$ freeze_graph_wrapper \ - --input_graph=$MODEL_DIR/graph.pbtxt \ - --input_checkpoint=$MODEL_DIR/model.ckpt-100 \ - --output_graph=$MODEL_DIR/output_graph.pb \ - --output_node_names=tfl_calibrated_rtl/add -``` - -### Convert frozen graph to tf-lite format using toco_wrapper - -This step will produce a tf-lite artifact suitable for use. Note that use will -require edits to the low level C++ code as described above - -```bash -$ toco_wrapper \ - --output_file=$MODEL_DIR/tflite.out \ - --graph_def_file=$MODEL_DIR/output_graph.pb \ - --input_arrays=$INPUT_NODE \ - --output_arrays=$OUTPUT_NODE \ - --allow_custom_ops -``` diff --git a/tensorflow_lattice/cc/tflite_ops/freeze_graph_wrapper.py b/tensorflow_lattice/cc/tflite_ops/freeze_graph_wrapper.py deleted file mode 100644 index e5c2aa2..0000000 --- a/tensorflow_lattice/cc/tflite_ops/freeze_graph_wrapper.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2018 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Runs TOCO tflite_converter after importing tensorflow_lattice ops.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports -# tensorflow_lattice must be imported in order for tensorflow to recognize its -# custom ops, which is necessary for freeze_graph to find them -import tensorflow_lattice as tfl # pylint: disable=unused-import -from tensorflow.python.tools import freeze_graph - - -def main(): - return freeze_graph.run_main() - - -if __name__ == '__main__': - main() diff --git a/tensorflow_lattice/cc/tflite_ops/helpers.h b/tensorflow_lattice/cc/tflite_ops/helpers.h deleted file mode 100644 index fc9f07e..0000000 --- a/tensorflow_lattice/cc/tflite_ops/helpers.h +++ /dev/null @@ -1,94 +0,0 @@ -/* Copyright 2018 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LATTICE_CC_TFLITE_OPS_HELPERS_H_ -#define TENSORFLOW_LATTICE_CC_TFLITE_OPS_HELPERS_H_ - -#include -#include - -namespace tflite_lattice { - -template -T ClipToBounds(const T value, const T lower_bound, const T upper_bound) { - return value > upper_bound ? upper_bound - : (value < lower_bound ? lower_bound : value); -} - -// BottomCornerIndexAndResidual contains a bottom corner index in the multi-cell -// lattice and residual vector for a given input. If out_of_bound[k] is true, -// then kth input is outside of multi-cell lattice's boundary. -template -struct BottomCornerIndexAndResidual { - int bottom_corner_index; - std::vector residual; - std::vector out_of_bound; -}; - -template -BottomCornerIndexAndResidual GetBottomCornerIndexAndResidual( - std::vector lattice_sizes, const float* input_row, - std::vector strides) { - int dimension = lattice_sizes.size(); - BottomCornerIndexAndResidual bottom_corner_index_and_residual; - int& bottom_corner_index = - bottom_corner_index_and_residual.bottom_corner_index; - std::vector& residual = bottom_corner_index_and_residual.residual; - std::vector& out_of_bound = - bottom_corner_index_and_residual.out_of_bound; - - residual.resize(dimension); - out_of_bound.resize(dimension); - - bottom_corner_index = 0; - for (int i = 0; i < dimension; ++i) { - const int max_vertex_in_i = lattice_sizes[i] - 1; - const float input_i = input_row[i]; - // Find the bottom corner lattice coordinates for the "i"th feature of - // this point. - // We clip to the bounds of the lattice, [0, max_vertex_in_i]. - - const int bottom_corner_i = ClipToBounds( - static_cast(floor(input_i)), 0, max_vertex_in_i - 1); - const Dtype residual_i = - ClipToBounds(input_i - bottom_corner_i, 0.0, 1.0); - - bottom_corner_index += strides[i] * bottom_corner_i; - residual[i] = residual_i; - out_of_bound[i] = (input_i < 0.0 || input_i > max_vertex_in_i); - } - return bottom_corner_index_and_residual; -} - -typedef struct { - // lattice_sizes is provided by user and records the number of nodes in each - // lattice dimension. All other members derived from this one. - // Naming matches tensoflow op. - std::vector lattice_sizes; - // Number of dimensions present. Total nodes = k ^ dimension if all - // lattice_sizes are k. - int dimension; - // One node index in dimension k equals strides[k] array indices - std::vector strides; - // Total number of nodes, is the product of numbers in lattice_sizes - int num_vertices; - // Nodes in a cell. Note that this is the number of non-zero weights that - // will result from interpolating one point. Also note that each vertex can - // belong to many cells. - int num_vertices_per_cell; -} InterpolationParams; - -} // namespace tflite_lattice - -#endif // TENSORFLOW_LATTICE_CC_TFLITE_OPS_HELPERS_H_ diff --git a/tensorflow_lattice/cc/tflite_ops/hypercube_interpolation_test.cc b/tensorflow_lattice/cc/tflite_ops/hypercube_interpolation_test.cc deleted file mode 100644 index 8649fce..0000000 --- a/tensorflow_lattice/cc/tflite_ops/hypercube_interpolation_test.cc +++ /dev/null @@ -1,221 +0,0 @@ -/* Copyright 2018 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include -#include - -#include -#include "flatbuffers/flexbuffers.h" -#include "tensorflow_lattice/cc/tflite_ops/tflite_ops.h" -#include "tensorflow/lite/interpreter.h" -#include "tensorflow/lite/kernels/register.h" -#include "tensorflow/lite/kernels/test_util.h" -#include "tensorflow/lite/model.h" -#include "tensorflow/lite/string_util.h" - -namespace tflite { -namespace ops { -namespace custom { - - -namespace { - -class HypercubeInterpolationOp : public SingleOpModel { - public: - HypercubeInterpolationOp(const TensorData& input, const TensorData& output, - std::vector lattice_sizes) { - input_ = AddInput(input); - output_ = AddOutput(output); - flexbuffers::Builder fbb; - size_t map_start = fbb.StartMap(); - auto vec_start = fbb.StartVector("lattice_sizes"); - for (int ii = 0; ii < lattice_sizes.size(); ++ii) { - fbb.Add(lattice_sizes[ii]); - } - fbb.EndVector(vec_start, /* typed */ true, /* fixed */ false); - fbb.EndMap(map_start); - fbb.Finish(); - SetCustomOp("HypercubeInterpolation", fbb.GetBuffer(), - Register_HYPERCUBE_INTERPOLATION); - - BuildInterpreter({GetShape(input_)}); - } - - int input() { return input_; } - std::vector GetOutput() { return ExtractVector(output_); } - std::vector GetOutputShape() { return GetTensorShape(output_); } - - private: - int input_; - int output_; -}; - -TEST(Test2D, HypercubeInterpolationTest) { - const float equal_vertex_weight_2d = 0.25; - const int out_row_length = 9; // 3 nodes ^ 2 dimensions - const int n_cells = 4; // (3 nodes - 1) ^ 2 dimensions - HypercubeInterpolationOp m({TensorType_FLOAT32, {n_cells, 2}}, - {TensorType_FLOAT32, {}}, {3, 3}); - m.PopulateTensor(m.input(), { - 0.5, 0.5, - 0.5, 1.5, - 1.5, 0.5, - 1.5, 1.5, - }); - m.Invoke(); - std::vector out(out_row_length * n_cells, 0.0); - int non_zero_indices[n_cells][4] = { - {0, 1, 3, 4}, - {3, 4, 6, 7}, - {1, 2, 4, 5}, - {4, 5, 7, 8}, - }; - int row_offset; - for (int ii = 0; ii < n_cells; ii++) { - for (int ij : non_zero_indices[ii]) { - row_offset = ii * out_row_length; - out[row_offset + ij] = equal_vertex_weight_2d; - } - } - EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(out, 1e-3))); -} - -TEST(Test3D, HypercubeInterpolationTest) { - const float equal_vertex_weight_3d = 0.125; - const int out_row_length = 27; // 3 nodes ^ 3 dimensions - const int n_cells = 8; // (3 nodes - 1) ^ 3 dimensions - const int tier_stride = 9; // 3 nodes ^ 2 dimensions - HypercubeInterpolationOp m( - {TensorType_FLOAT32, {n_cells, 3}}, {TensorType_FLOAT32, {}}, {3, 3, 3}); - m.PopulateTensor(m.input(), { - 0.5, 0.5, 0.5, - 0.5, 1.5, 0.5, - 1.5, 0.5, 0.5, - 1.5, 1.5, 0.5, - 0.5, 0.5, 1.5, - 0.5, 1.5, 1.5, - 1.5, 0.5, 1.5, - 1.5, 1.5, 1.5, - }); - m.Invoke(); - std::vector out(out_row_length * n_cells, 0.0); - // the 3D lattice of 9 cells is a stack of 2 'tiers' of 3x3 (4 cell) latice - // the non-zero entries follow the same pattern as they did for the 2D case - // except that the value must be propagated vertically to 8 vertices of a cube - // instead of the 4 vertices of a square. Also, the 2D must be iterated over - // twice, once for each tier, but both will have the same 2D projection. - int non_zero_indices[n_cells / 2][4] = { - {0, 1, 3, 4}, - {3, 4, 6, 7}, - {1, 2, 4, 5}, - {4, 5, 7, 8}, - }; - int row_offset; - int tier_offset; - int on_tier; - for (int ii = 0; ii < n_cells; ++ii) { - row_offset = ii * out_row_length; - on_tier = ii < 4 ? 0 : 1; - for (int tier = 0; tier < 2; ++tier) { - tier_offset = (on_tier + tier) * tier_stride; - for (int ij : non_zero_indices[ii % 4]) { - out[row_offset + tier_offset + ij] = equal_vertex_weight_3d; - } - } - } - EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(out, 1e-3))); -} - -TEST(ThreeDoubleLattice, HypercubeInterpolationTest) { - HypercubeInterpolationOp m({TensorType_FLOAT32, {8, 1}}, - {TensorType_FLOAT32, {}}, {3}); - m.PopulateTensor(m.input(), { - -1.0, - 0.0, - 0.2, - 0.8, - 1.0, - 1.3, - 2.0, - 2.5, - }); - m.Invoke(); - std::vector out = { - 1.0, 0.0, 0.0, - 1.0, 0.0, 0.0, - 0.8, 0.2, 0.0, - 0.2, 0.8, 0.0, - 0.0, 1.0, 0.0, - 0.0, 0.7, 0.3, - 0.0, 0.0, 1.0, - 0.0, 0.0, 1.0, - }; - - EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(out, 1e-3))); -} - -TEST(TwoByTwoFloatLattice, HypercubeInterpolationTest) { - HypercubeInterpolationOp m({TensorType_FLOAT32, {7, 2}}, - {TensorType_FLOAT32, {}}, {2, 2}); - m.PopulateTensor(m.input(), { - 0.0, 0.0, - 0.0, 1.0, - 1.0, 0.0, - 1.0, 1.0, - 0.5, 0.5, - 0.2, 0.8, - 0.2, 0.3, - }); - m.Invoke(); - std::vector out = { - 1.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 1.0, 0.0, - 0.0, 1.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 1.0, - 0.25, 0.25, 0.25, 0.25, - 0.16, 0.04, 0.64, 0.16, - 0.56, 0.14, 0.24, 0.06, - }; - - EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(out, 1e-3))); -} - -TEST(TestOOB, HypercubeInterpolationTest) { - HypercubeInterpolationOp m({TensorType_FLOAT32, {3, 2}}, - {TensorType_FLOAT32, {}}, {2, 2}); - m.PopulateTensor(m.input(), { - 0.0, 3.0, - 1.4, .1, - 1.0, 1.0, - }); - m.Invoke(); - std::vector out = { - 0.0, 0.0, 1.0, 0.0, - 0.0, 0.9, 0.0, 0.1, - 0.0, 0.0, 0.0, 1.0, - }; - - EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(out, 1e-3))); -} - -} // namespace -} // namespace custom -} // namespace ops -} // namespace tflite - -int main(int argc, char** argv) { - ::tflite::LogToStderr(); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/tensorflow_lattice/cc/tflite_ops/interpolation.cc b/tensorflow_lattice/cc/tflite_ops/interpolation.cc deleted file mode 100644 index ccc7f05..0000000 --- a/tensorflow_lattice/cc/tflite_ops/interpolation.cc +++ /dev/null @@ -1,254 +0,0 @@ -/* Copyright 2018 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -// tf-lite op corresponding to hypercube_interpolation op defined by tf-lattice - -#include -#include "flatbuffers/flexbuffers.h" -#include "tensorflow_lattice/cc/tflite_ops/helpers.h" -#include "tensorflow/lite/context.h" -#include "tensorflow/lite/kernels/internal/tensor.h" -#include "tensorflow/lite/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace custom { -namespace interpolation { - -using tflite_lattice::BottomCornerIndexAndResidual; -using tflite_lattice::GetBottomCornerIndexAndResidual; -using tflite_lattice::InterpolationParams; - - -// See tensorflow_lattice/cc/kernels/hypercube_interpolation_kernels.cc -// for an in depth explanation of this routine and further references. -// Edge behavior is inherited from the tensorflow op, which specifies that -// points out of bounds are clipped to nearest cell boundary. -void ComputeInterpolationWeightsHyper(const std::vector& lattice_sizes, - int num_vertices_per_cell, - const std::vector& strides, - const float* input_row, - float* output_row) { - int dimension = lattice_sizes.size(); - std::vector indices(num_vertices_per_cell); - std::vector weights(num_vertices_per_cell); - - const BottomCornerIndexAndResidual index_and_residual = - GetBottomCornerIndexAndResidual(lattice_sizes, input_row, strides); - const std::vector& residual = - index_and_residual.residual; - - indices[0] = index_and_residual.bottom_corner_index; - weights[0] = 1.0; - int current_highest_dimension = 0; - float current_residual_value = residual[current_highest_dimension]; - for (int i = 1; i < num_vertices_per_cell; ++i) { - // Make sure that we're within the bounds of the unit hypercube. - TFLITE_DCHECK_GE(current_residual_value, 0); - TFLITE_DCHECK_LE(current_residual_value, 1); - // Sanity check: current_highest_dimension has better respect the bounds. - TFLITE_DCHECK_GE(current_highest_dimension, 0); - TFLITE_DCHECK_LT(current_highest_dimension, dimension); - const int earlier_i = i ^ (1 << current_highest_dimension); - indices[i] = indices[earlier_i] + strides[current_highest_dimension]; - weights[i] = weights[earlier_i] * current_residual_value; - weights[earlier_i] *= (1.0 - current_residual_value); - - if ((i & (i + 1)) == 0) { - // If i + 1 is power of 2, then current_highest_dimension has changed, - // that means, that we are processing next dimension. - ++current_highest_dimension; - if (dimension >= current_highest_dimension + 1) { - current_residual_value = residual[current_highest_dimension]; - } - } - } - // initialize output tensor to zeros - // This is the number of vertices, which is the length of the output rows, - // See Init for more context - const int row_size = strides[dimension - 1] * lattice_sizes[dimension - 1]; - for (int i = 0; i < row_size; ++i) { - output_row[i] = 0.0; - } - for (int jj = 0; jj < indices.size(); ++jj) { - output_row[indices[jj]] = weights[jj]; - } -} - -// Returns the permutation such that -// values[permutation[0]] >= ... >= values[permutation[d - 1]] where -// d == values.size(). -std::vector DescendingPermutation(const std::vector& values) { - std::vector permutation(values.size()); - std::iota(permutation.begin(), permutation.end(), 0); - - auto cmp = [&values](const int left, const int right) -> bool { - return values[left] > values[right]; - }; - std::sort(permutation.begin(), permutation.end(), cmp); - return permutation; -} - -// This function is adapted from ComputeInterpolationWeights in -// tensorflow_lattice/cc/kernels/simplex_interpolation_kernels.cc, -// see there for a detailed exposition. -// Produces simplex interpolation weights for an input that is in the unit -// hypercube (the residual), as well as the corresponding indices in the lattice -// (based on the bottom_corner). See http://jmlr.org/papers/v17/15-243.html for -// more details. -void ComputeInterpolationWeightsSimplex(const std::vector& lattice_sizes, - int num_vertices_per_cell, - const std::vector& strides, - const float* input_row, - float* output_row) { - int dimension = lattice_sizes.size(); - - const BottomCornerIndexAndResidual bottom_corner_index_and_residual = - GetBottomCornerIndexAndResidual(lattice_sizes, input_row, strides); - const std::vector& residual = - bottom_corner_index_and_residual.residual; - - const std::vector descending_permutation = - DescendingPermutation(residual); - - const int input_dim = dimension; - // interpolation weight contains upto d + 1 non-zero elements. - // Number of non-zero weights. - const int max_nonzero = input_dim + 1; - std::vector indices(max_nonzero); - std::vector weights(max_nonzero); - - float current_residual = 1.0; - int current_index = bottom_corner_index_and_residual.bottom_corner_index; - for (int i = 0; i < input_dim; ++i) { - const int current_dim = descending_permutation[i]; - const float next_residual = residual[current_dim]; - // Assigning index and weight. - indices[i] = current_index; - weights[i] = current_residual - next_residual; - // Proceed to the next item. - current_index += strides[current_dim]; - current_residual = next_residual; - } - // The boundary case. - indices[input_dim] = current_index; - weights[input_dim] = current_residual; - - // initialize output tensor to zeros - // This is the number of vertices, which is the length of the output rows, - // See Init for more context - const int row_size = strides[dimension - 1] * lattice_sizes[dimension - 1]; - for (int i = 0; i < row_size; ++i) { - output_row[i] = 0.0; - } - for (int j = 0; j < indices.size(); ++j) { - output_row[indices[j]] = weights[j]; - } -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - auto* data = new InterpolationParams; - const uint8_t* buffer_t = reinterpret_cast(buffer); - const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); - auto sizes = m["lattice_sizes"].AsTypedVector(); - data->dimension = sizes.size(); - for (int i = 0; i < data->dimension; ++i) { - data->lattice_sizes.push_back(sizes[i].AsInt64()); - } - data->strides.resize(data->dimension); - data->num_vertices = 1; - for (int i = 0; i < data->dimension; ++i) { - data->strides[i] = data->num_vertices; - data->num_vertices *= data->lattice_sizes[i]; - } - data->num_vertices_per_cell = 1 << data->dimension; - return data; -} - -void Free(TfLiteContext* context, void* buffer) { - delete reinterpret_cast(buffer); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* output = GetOutput(context, node, 0); - - const auto* params = - reinterpret_cast(node->user_data); - const TfLiteTensor* input = GetInput(context, node, 0); - // output tensor shape is number of input rows x number of vertices - TfLiteIntArray* output_size = TfLiteIntArrayCreate(2); - output_size->data[0] = input->dims->data[0]; - output_size->data[1] = params->num_vertices; - TF_LITE_ENSURE_OK(context, - context->ResizeTensor(context, output, output_size)); - - return kTfLiteOk; -} - -using WeightCalculator = const std::function&, - int, - const std::vector&, - const float*, - float* -)>; - -TfLiteStatus Eval( - TfLiteContext* context, TfLiteNode* node, - WeightCalculator compute_weights_f) { - const auto* params = - reinterpret_cast(node->user_data); - - const TfLiteTensor* input = GetInput(context, node, 0); - const float* input_flat = GetTensorData(input); - TfLiteTensor* output = GetOutput(context, node, 0); - float* output_flat = GetTensorData(output); - - for (int row_i = 0; row_i < input->dims->data[0]; ++row_i) { - const float* input_row = input_flat + row_i * input->dims->data[1]; - float* output_row = output_flat + row_i * params->num_vertices; - compute_weights_f(params->lattice_sizes, - params->num_vertices_per_cell, params->strides, input_row, - output_row); - } - return kTfLiteOk; -} - -TfLiteStatus EvalHyper(TfLiteContext* context, TfLiteNode* node) { - return Eval(context, node, ComputeInterpolationWeightsHyper); -} - -TfLiteStatus EvalSimplex(TfLiteContext* context, TfLiteNode* node) { - return Eval(context, node, ComputeInterpolationWeightsSimplex); -} - -} // namespace interpolation - -TfLiteRegistration* Register_HYPERCUBE_INTERPOLATION() { - static TfLiteRegistration r = {interpolation::Init, interpolation::Free, - interpolation::Prepare, - interpolation::EvalHyper}; - return &r; -} - -TfLiteRegistration* Register_SIMPLEX_INTERPOLATION() { - static TfLiteRegistration r = {interpolation::Init, interpolation::Free, - interpolation::Prepare, - interpolation::EvalSimplex}; - return &r; -} - -} // namespace custom -} // namespace ops -} // namespace tflite diff --git a/tensorflow_lattice/cc/tflite_ops/pwl_indexing_calibrator.cc b/tensorflow_lattice/cc/tflite_ops/pwl_indexing_calibrator.cc deleted file mode 100644 index bd431af..0000000 --- a/tensorflow_lattice/cc/tflite_ops/pwl_indexing_calibrator.cc +++ /dev/null @@ -1,231 +0,0 @@ -/* Copyright 2018 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -// tf-lite op corresponding to PwlIndexingCalibrator op defined by tf-lattice - -#include "flatbuffers/flexbuffers.h" -#include "tensorflow_lattice/cc/tflite_ops/tflite_ops.h" -#include "tensorflow/lite/context.h" -#include "tensorflow/lite/kernels/internal/tensor.h" -#include "tensorflow/lite/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace custom { -namespace pwl_indexing_calibrator { - -// Typically the two nearest keypoints are returned for interpolation. -// If the input coincides with a keypoint, then only that one is returned. -// If the input is outside the keypoint range, then only the nearest keypoint -// is returned. -constexpr int kMaxNumInterpolationPoints = 2; - -template -struct InterpolationPoints { - int num_points; - int64_t lower_index; - Dtype weights[kMaxNumInterpolationPoints]; -}; - - - -// Gets the index of an element in a flat representation given its row and col -inline int Get2DIndex(int n_cols, int row, int col) { - return n_cols * row + col; -} - -// Find the interpolation points, but _not the weights_, for the given -// uncalibrated value and keypoints inputs (kp_inputs). -// The interpolation will be between kp_inputs[lower_index] and -// kp_inputs[lower_index + 1]. Except outside the edges or if x (uncalibrated) -// is exactly on top of a keypoint, in which case the function returns 1 point. -// It uses a simple binary-search, so it is O(log(|kp_inputs|)). -template -InterpolationPoints FindInterpolationPoints(const Dtype uncalibrated, - const float* kp_inputs, - int num_kp) { - if (uncalibrated <= kp_inputs[0]) { - return InterpolationPoints{1, 0}; - } - if (uncalibrated >= kp_inputs[num_kp - 1]) { - return InterpolationPoints{1, num_kp - 1}; - } - - // Binary search the keypoints inputs. - int64_t min_idx = 0, max_idx = num_kp; - while (max_idx > min_idx + 1) { - const int64_t idx = (max_idx + min_idx) / 2; - const float value = kp_inputs[idx]; - if (uncalibrated == value) { - return InterpolationPoints{1, idx}; - } - if (uncalibrated > value) { - min_idx = idx; - } else { - max_idx = idx; - } - } - - // Two points, where lower_index is min_idx. - return InterpolationPoints{2, min_idx}; -} - -// Find interpolations points and associated weights for the given -// uncalibrated value and keypoints inputs (kp_inputs). -// Returns 1 interpolation point if uncalibrated is exactly on top of an -// input keypoint (or if beyond the edges), or 2 if in between two -// keypoints. -// See FindInterpolationPoints. -template -InterpolationPoints FindInterpolationPointsWithWeights( - const Dtype uncalibrated, const float* kp_inputs, int num_kp) { - // Get points an calculates weights. - InterpolationPoints interpolation_points = - FindInterpolationPoints(uncalibrated, kp_inputs, num_kp); - if (interpolation_points.num_points == 1) { - // All weight goes to the exact one keypoint where the uncalibrated value - // lies. - interpolation_points.weights[0] = 1.0; - return interpolation_points; - } - const Dtype delta = kp_inputs[interpolation_points.lower_index + 1] - - kp_inputs[interpolation_points.lower_index]; - interpolation_points.weights[1] = - (uncalibrated - kp_inputs[interpolation_points.lower_index]) / delta; - interpolation_points.weights[0] = 1.0 - interpolation_points.weights[1]; - return interpolation_points; -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* output = GetOutput(context, node, 0); - const TfLiteTensor* input = GetInput(context, node, 0); - TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); - const TfLiteTensor* kp_inputs = GetInput(context, node, 1); - TF_LITE_ENSURE_EQ(context, NumDimensions(kp_inputs), 1); - // output tensor shape is number of input rows x number of vertices - TfLiteIntArray* output_size = TfLiteIntArrayCreate(2); - output_size->data[0] = SizeOfDimension(input, 0); - output_size->data[1] = SizeOfDimension(kp_inputs, 0); - TF_LITE_ENSURE_OK(context, - context->ResizeTensor(context, output, output_size)); - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input = GetInput(context, node, 0); - const float* input_flat = GetTensorData(input); - - const TfLiteTensor* kp_inputs = GetInput(context, node, 1); - const float* kp_inputs_flat = GetTensorData(kp_inputs); - - TfLiteTensor* output = GetOutput(context, node, 0); - float* output_flat = GetTensorData(output); - - for (int row = 0; row < SizeOfDimension(input, 0); ++row) { - const float uncalibrated = input_flat[row]; - InterpolationPoints pts = FindInterpolationPointsWithWeights( - uncalibrated, kp_inputs_flat, SizeOfDimension(kp_inputs, 0)); - float* output_row = output_flat + row * SizeOfDimension(kp_inputs, 0); - for (int i = 0; i < SizeOfDimension(kp_inputs, 0); ++i) { - output_row[i] = 0.0; - } - for (int k = 0; k < pts.num_points; ++k) { - output_row[pts.lower_index + k] = pts.weights[k]; - } - } - return kTfLiteOk; -} - -TfLiteStatus Prepare_Sparse(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* indices_output = GetOutput(context, node, 0); - TfLiteTensor* weights_output = GetOutput(context, node, 1); - SetTensorToDynamic(indices_output); - SetTensorToDynamic(weights_output); - weights_output->type = kTfLiteFloat32; - indices_output->type = kTfLiteInt32; - return kTfLiteOk; -} - -TfLiteStatus Eval_Sparse(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input = GetInput(context, node, 0); - const float* input_flat = GetTensorData(input); - - const TfLiteTensor* kp_inputs = GetInput(context, node, 1); - const float* kp_inputs_flat = GetTensorData(kp_inputs); - - TfLiteTensor* indices_output = GetOutput(context, node, 0); - TfLiteIntArray* indices_output_size = TfLiteIntArrayCreate(2); - indices_output_size->data[0] = - kMaxNumInterpolationPoints * SizeOfDimension(input, 0); - indices_output_size->data[1] = 2; - TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, indices_output, - indices_output_size)); - int* indices_output_flat = GetTensorData(indices_output); - - TfLiteTensor* weights_output = GetOutput(context, node, 1); - TfLiteIntArray* weights_output_size = TfLiteIntArrayCreate(1); - weights_output_size->data[0] = - kMaxNumInterpolationPoints * SizeOfDimension(input, 0); - TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, weights_output, - weights_output_size)); - float* weights_output_flat = GetTensorData(weights_output); - - int current_output_row = 0; - for (int row = 0; row < SizeOfDimension(input, 0); ++row) { - const float uncalibrated = input_flat[row]; - InterpolationPoints pts = FindInterpolationPointsWithWeights( - uncalibrated, kp_inputs_flat, SizeOfDimension(kp_inputs, 0)); - for (int i = 0; i < pts.num_points; ++i) { - weights_output_flat[current_output_row] = pts.weights[i]; - indices_output_flat[Get2DIndex(2, current_output_row, 0)] = row; - indices_output_flat[Get2DIndex(2, current_output_row, 1)] = - pts.lower_index + i; - ++current_output_row; - } - } - - TfLiteIntArray* indices_output_size_ = TfLiteIntArrayCreate(2); - indices_output_size_->data[0] = current_output_row; - indices_output_size_->data[1] = 2; - TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, indices_output, - indices_output_size_)); - - TfLiteIntArray* weights_output_size_ = TfLiteIntArrayCreate(1); - weights_output_size_->data[0] = current_output_row; - TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, weights_output, - weights_output_size_)); - - return kTfLiteOk; -} - -} // namespace pwl_indexing_calibrator - -TfLiteRegistration* Register_PWL_INDEXING_CALIBRATOR() { - static TfLiteRegistration r = {nullptr, nullptr, - pwl_indexing_calibrator::Prepare, - pwl_indexing_calibrator::Eval}; - return &r; -} - -TfLiteRegistration* Register_PWL_INDEXING_CALIBRATOR_SPARSE() { - static TfLiteRegistration r = {nullptr, nullptr, - pwl_indexing_calibrator::Prepare_Sparse, - pwl_indexing_calibrator::Eval_Sparse}; - return &r; -} - -} // namespace custom -} // namespace ops -} // namespace tflite diff --git a/tensorflow_lattice/cc/tflite_ops/pwl_indexing_calibrator_sparse_test.cc b/tensorflow_lattice/cc/tflite_ops/pwl_indexing_calibrator_sparse_test.cc deleted file mode 100644 index 752018d..0000000 --- a/tensorflow_lattice/cc/tflite_ops/pwl_indexing_calibrator_sparse_test.cc +++ /dev/null @@ -1,157 +0,0 @@ -/* Copyright 2018 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include -#include - -#include -#include "flatbuffers/flexbuffers.h" -#include "tensorflow_lattice/cc/tflite_ops/tflite_ops.h" -#include "tensorflow/lite/interpreter.h" -#include "tensorflow/lite/kernels/register.h" -#include "tensorflow/lite/kernels/test_util.h" -#include "tensorflow/lite/model.h" -#include "tensorflow/lite/string_util.h" - -namespace tflite { -namespace ops { -namespace custom { - -namespace { - -class PWLIndexingCalibratorSparseOp : public SingleOpModel { - public: - PWLIndexingCalibratorSparseOp(const TensorData& input, - const TensorData& kp_inputs, - const TensorData& indices_output, - const TensorData& weights_output) { - input_ = AddInput(input); - kp_inputs_ = AddInput(kp_inputs); - indices_output_ = AddOutput(indices_output); - weights_output_ = AddOutput(weights_output); - SetCustomOp("PWLIndexingCalibratorSparseOp", {}, - Register_PWL_INDEXING_CALIBRATOR_SPARSE); - - BuildInterpreter({GetShape(input_), GetShape(kp_inputs_)}); - } - - int input() { return input_; } - int kp_inputs() { return kp_inputs_; } - std::vector GetIndicesOutput() { - return ExtractVector(indices_output_); - } - std::vector GetIndicesOutputShape() { - return GetTensorShape(indices_output_); - } - std::vector GetWeightsOutput() { - return ExtractVector(weights_output_); - } - std::vector GetWeightsOutputShape() { - return GetTensorShape(weights_output_); - } - - private: - int input_; - int kp_inputs_; - int indices_output_; - int weights_output_; -}; - -TEST(TestBasic, PWLIndexingCalibratorSparseTest) { - PWLIndexingCalibratorSparseOp m( - {TensorType_FLOAT32, {6}}, {TensorType_FLOAT32, {4}}, - {TensorType_INT32, {}}, {TensorType_FLOAT32, {}}); - m.PopulateTensor(m.input(), {-3.0, 0.0, 0.1, 0.5, 0.75, 2.0}); - m.PopulateTensor(m.kp_inputs(), {0.0, 0.25, 0.5, 0.75}); - m.Invoke(); - std::vector indices_out = { - 0, 0, - 1, 0, - 2, 0, - 2, 1, - 3, 2, - 4, 3, - 5, 3, - }; - std::vector weights_out = { - 1.0, - 1.0, - 0.6, - 0.4, - 1.0, - 1.0, - 1.0, - }; - EXPECT_THAT(m.GetIndicesOutput(), testing::ElementsAreArray(indices_out)); - EXPECT_THAT(m.GetWeightsOutput(), - ElementsAreArray(ArrayFloatNear(weights_out, 1e-3))); -} - -TEST(TestTF, PWLIndexingCalibratorSparseTest) { - std::vector keypoints = {0.0, 20.0, 40.0, 60.0, 80.0, 100.0}; - struct Test { - std::vector uncalibrated; - std::vector expected_weights; - std::vector expected_indices; - }; - std::vector tests{ - // Bounded min. - {{-10.0}, {1.0}, {0}}, - - // Bounded max. - {{200.0}, {1.0}, {5}}, - - // Exact match. - {{80.0}, {1.0}, {4}}, - - // Interpolated examples. - {{10.0}, {0.5, 0.5}, {0, 1}}, - {{35.0}, {0.25, 0.75}, {1, 2}}, - }; - - for (auto &test : tests) { - PWLIndexingCalibratorSparseOp m( - {TensorType_FLOAT32, {(int)test.uncalibrated.size()}}, - {TensorType_FLOAT32, {(int)keypoints.size()}}, {TensorType_FLOAT32, {}}, - {TensorType_FLOAT32, {}}); - m.PopulateTensor( - m.input(), 0, test.uncalibrated.data(), - test.uncalibrated.data() + test.uncalibrated.size()); - m.PopulateTensor(m.kp_inputs(), 0, keypoints.data(), - keypoints.data() + keypoints.size()); - m.Invoke(); - std::vector indices_out(2 * test.expected_weights.size()); - std::vector weights_out(test.expected_weights.size()); - for (int kk = 0; kk < test.expected_weights.size(); ++kk) { - int indices_out_row_offset = 2 * kk; - indices_out[indices_out_row_offset] = 0; - indices_out[indices_out_row_offset + 1] = test.expected_indices[kk]; - weights_out[kk] = test.expected_weights[kk]; - } - EXPECT_THAT(m.GetIndicesOutput(), testing::ElementsAreArray(indices_out)); - EXPECT_THAT(m.GetWeightsOutput(), - ElementsAreArray(ArrayFloatNear(weights_out, 1e-3))); - } -} - -} // namespace -} // namespace custom -} // namespace ops -} // namespace tflite - -int main(int argc, char** argv) { - ::tflite::LogToStderr(); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/tensorflow_lattice/cc/tflite_ops/pwl_indexing_calibrator_test.cc b/tensorflow_lattice/cc/tflite_ops/pwl_indexing_calibrator_test.cc deleted file mode 100644 index 2696433..0000000 --- a/tensorflow_lattice/cc/tflite_ops/pwl_indexing_calibrator_test.cc +++ /dev/null @@ -1,143 +0,0 @@ -/* Copyright 2018 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include -#include - -#include -#include "flatbuffers/flexbuffers.h" -#include "tensorflow_lattice/cc/tflite_ops/tflite_ops.h" -#include "tensorflow/lite/interpreter.h" -#include "tensorflow/lite/kernels/register.h" -#include "tensorflow/lite/kernels/test_util.h" -#include "tensorflow/lite/model.h" -#include "tensorflow/lite/string_util.h" - -namespace tflite { -namespace ops { -namespace custom { - -namespace { - -class PWLIndexingCalibratorOp : public SingleOpModel { - public: - PWLIndexingCalibratorOp(const TensorData& input, const TensorData& kp_inputs, - const TensorData& output) { - input_ = AddInput(input); - kp_inputs_ = AddInput(kp_inputs); - output_ = AddOutput(output); - SetCustomOp("PWLIndexingCalibratorOp", {}, - Register_PWL_INDEXING_CALIBRATOR); - - BuildInterpreter({GetShape(input_), GetShape(kp_inputs_)}); - } - - int input() { return input_; } - int kp_inputs() { return kp_inputs_; } - std::vector GetOutput() { return ExtractVector(output_); } - std::vector GetOutputShape() { return GetTensorShape(output_); } - - private: - int input_; - int kp_inputs_; - int output_; -}; - -TEST(TestBasic, PWLIndexingCalibratorTest) { - PWLIndexingCalibratorOp m({TensorType_FLOAT32, {6}}, - {TensorType_FLOAT32, {4}}, - {TensorType_FLOAT32, {}}); - m.PopulateTensor(m.input(), {-3.0, 0.0, 0.1, 0.5, 0.75, 2.0}); - m.PopulateTensor(m.kp_inputs(), {0.0, 0.25, 0.5, 0.75}); - m.Invoke(); - std::vector out = { - 1.0, 0.0, 0.0, 0.0, - 1.0, 0.0, 0.0, 0.0, - 0.6, 0.4, 0.0, 0.0, - 0.0, 0.0, 1.0, 0.0, - 0.0, 0.0, 0.0, 1.0, - 0.0, 0.0, 0.0, 1.0, - }; - EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(out, 1e-3))); -} - -TEST(TestSingle, PWLIndexingCalibratorTest) { - PWLIndexingCalibratorOp m({TensorType_FLOAT32, {3}}, - {TensorType_FLOAT32, {1}}, - {TensorType_FLOAT32, {}}); - m.PopulateTensor(m.input(), {-1.0, 0.0, 1.1}); - m.PopulateTensor(m.kp_inputs(), {0.0}); - m.Invoke(); - std::vector out = { - 1.0, - 1.0, - 1.0, - }; - EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(out, 1e-3))); -} - -TEST(TestTF, PWLIndexingCalibratorTest) { - std::vector keypoints = {0.0, 20.0, 40.0, 60.0, 80.0, 100.0}; - struct Test { - std::vector uncalibrated; - std::vector expected_weights; - std::vector expected_indices; - }; - std::vector tests{ - // Bounded min. - {{-10.0}, {1.0, 0.0}, {0, 1}}, - - // Bounded max. - {{200.0}, {0.0, 1.0}, {4, 5}}, - - // Exact match. - {{80.0}, {0.0, 1.0, 0.0}, {3, 4, 5}}, - - // Interpolated examples. - {{10.0}, {0.5, 0.5}, {0, 1}}, - {{35.0}, {0.25, 0.75}, {1, 2}}, - }; - - for (auto& test : tests) { - PWLIndexingCalibratorOp m( - {TensorType_FLOAT32, {(int)test.uncalibrated.size()}}, - {TensorType_FLOAT32, {(int)keypoints.size()}}, - {TensorType_FLOAT32, {}}); - m.PopulateTensor( - m.input(), 0, test.uncalibrated.data(), - test.uncalibrated.data() + test.uncalibrated.size()); - m.PopulateTensor(m.kp_inputs(), 0, keypoints.data(), - keypoints.data() + keypoints.size()); - m.Invoke(); - std::vector out(keypoints.size() * test.uncalibrated.size()); - for (int ii = 0; ii < out.size(); ++ii) { - out[ii] = 0.0; - } - for (int kk = 0; kk < test.expected_weights.size(); ++kk) { - out[test.expected_indices[kk]] = test.expected_weights[kk]; - } - EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(out, 1e-3))); - } -} - -} // namespace -} // namespace custom -} // namespace ops -} // namespace tflite - -int main(int argc, char** argv) { - ::tflite::LogToStderr(); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/tensorflow_lattice/cc/tflite_ops/simplex_interpolation_test.cc b/tensorflow_lattice/cc/tflite_ops/simplex_interpolation_test.cc deleted file mode 100644 index 6e8c215..0000000 --- a/tensorflow_lattice/cc/tflite_ops/simplex_interpolation_test.cc +++ /dev/null @@ -1,118 +0,0 @@ -/* Copyright 2018 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include -#include - -#include -#include "flatbuffers/flexbuffers.h" -#include "tensorflow_lattice/cc/tflite_ops/tflite_ops.h" -#include "tensorflow/lite/interpreter.h" -#include "tensorflow/lite/kernels/register.h" -#include "tensorflow/lite/kernels/test_util.h" -#include "tensorflow/lite/model.h" -#include "tensorflow/lite/string_util.h" - -namespace tflite { -namespace ops { -namespace custom { - - -namespace { - -class SimplexInterpolationOp : public SingleOpModel { - public: - SimplexInterpolationOp(const TensorData& input, const TensorData& output, - std::vector lattice_sizes) { - input_ = AddInput(input); - output_ = AddOutput(output); - flexbuffers::Builder fbb; - size_t map_start = fbb.StartMap(); - auto vec_start = fbb.StartVector("lattice_sizes"); - for (int i = 0; i < lattice_sizes.size(); ++i) { - fbb.Add(lattice_sizes[i]); - } - fbb.EndVector(vec_start, /* typed */ true, /* fixed */ false); - fbb.EndMap(map_start); - fbb.Finish(); - SetCustomOp("SimplexInterpolation", fbb.GetBuffer(), - Register_SIMPLEX_INTERPOLATION); - - BuildInterpreter({GetShape(input_)}); - } - - int input() { return input_; } - std::vector GetOutput() { return ExtractVector(output_); } - std::vector GetOutputShape() { return GetTensorShape(output_); } - - private: - int input_; - int output_; -}; - -TEST(Test1D, SimplexInterpolationTest) { - SimplexInterpolationOp m({TensorType_FLOAT32, {8, 1}}, - {TensorType_FLOAT32, {}}, {3}); - m.PopulateTensor(m.input(), { - -1.0, 0.0, 0.2, 0.8, 1.0, 1.3, 2.0, 2.5 - }); - m.Invoke(); - std::vector out = { - 1.0, 0.0, 0.0, - 1.0, 0.0, 0.0, - 0.8, 0.2, 0.0, - 0.2, 0.8, 0.0, - 0.0, 1.0, 0.0, - 0.0, 0.7, 0.3, - 0.0, 0.0, 1.0, - 0.0, 0.0, 1.0 - }; - EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(out, 1e-3))); -} - -TEST(Test2D, SimplexInterpolationTest) { - SimplexInterpolationOp m({TensorType_FLOAT32, {7, 2}}, - {TensorType_FLOAT32, {}}, {2, 2}); - m.PopulateTensor(m.input(), { - 0.0, 0.0, - 0.0, 1.0, - 1.0, 0.0, - 1.0, 1.0, - 0.5, 0.5, - 0.2, 0.8, - 0.2, 0.3 - }); - m.Invoke(); - std::vector out = { - 1.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 1.0, 0.0, - 0.0, 1.0, 0.0, 0.0, - 0.0, 0.0, 0.0, 1.0, - 0.5, 0.0, 0.0, 0.5, - 0.2, 0.0, 0.6, 0.2, - 0.7, 0.0, 0.1, 0.2 - }; - EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(out, 1e-3))); -} - -} // namespace -} // namespace custom -} // namespace ops -} // namespace tflite - -int main(int argc, char** argv) { - ::tflite::LogToStderr(); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/tensorflow_lattice/cc/tflite_ops/tflite_ops.cc b/tensorflow_lattice/cc/tflite_ops/tflite_ops.cc deleted file mode 100644 index bfe0181..0000000 --- a/tensorflow_lattice/cc/tflite_ops/tflite_ops.cc +++ /dev/null @@ -1,33 +0,0 @@ -/* Copyright 2018 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow_lattice/cc/tflite_ops/tflite_ops.h" -#include "tensorflow/lite/context.h" -#include "tensorflow/lite/op_resolver.h" - -namespace tflite { - -void RegisterTfLatticeOps(MutableOpResolver* resolver) { - resolver->AddCustom("HypercubeInterpolation", - tflite::ops::custom::Register_HYPERCUBE_INTERPOLATION()); - resolver->AddCustom("SimplexInterpolation", - tflite::ops::custom::Register_SIMPLEX_INTERPOLATION()); - resolver->AddCustom("PWLIndexingCalibration", - tflite::ops::custom::Register_PWL_INDEXING_CALIBRATOR()); - resolver->AddCustom( - "PWLIndexingCalibrationSparse", - tflite::ops::custom::Register_PWL_INDEXING_CALIBRATOR_SPARSE()); -} - -} // namespace tflite diff --git a/tensorflow_lattice/cc/tflite_ops/tflite_ops.h b/tensorflow_lattice/cc/tflite_ops/tflite_ops.h deleted file mode 100644 index c1b6830..0000000 --- a/tensorflow_lattice/cc/tflite_ops/tflite_ops.h +++ /dev/null @@ -1,41 +0,0 @@ -/* Copyright 2018 The TensorFlow Lattice Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LATTICE_CC_TFLITE_OPS_TFLITE_OPS_H_ -#define TENSORFLOW_LATTICE_CC_TFLITE_OPS_TFLITE_OPS_H_ -#include "tensorflow/lite/context.h" -#include "tensorflow/lite/op_resolver.h" - -// This file provides declarations and utilities useful for consumers of TF-Lite -// ops in TF-Lattice project. In particular, there are headers for registration -// functions for each op, as well as a function that performs the registration. -namespace tflite { -namespace ops { -namespace custom { - -TfLiteRegistration* Register_HYPERCUBE_INTERPOLATION(); -TfLiteRegistration* Register_SIMPLEX_INTERPOLATION(); -TfLiteRegistration* Register_PWL_INDEXING_CALIBRATOR(); -TfLiteRegistration* Register_PWL_INDEXING_CALIBRATOR_SPARSE(); - -} // namespace custom -} // namespace ops - -// Registers the custom ops so that tflite interpreter can find them. Must be -// called by clients that intend to use these ops. -void RegisterTfLatticeOps(MutableOpResolver* resolver); - -} // namespace tflite - -#endif // TENSORFLOW_LATTICE_CC_TFLITE_OPS_TFLITE_OPS_H_ diff --git a/tensorflow_lattice/cc/tflite_ops/toco_wrapper.py b/tensorflow_lattice/cc/tflite_ops/toco_wrapper.py deleted file mode 100644 index 1335e50..0000000 --- a/tensorflow_lattice/cc/tflite_ops/toco_wrapper.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2018 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Runs TOCO tflite_converter after importing tensorflow_lattice ops.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import sys - -# Dependency imports -# tensorflow_lattice must be imported in order for tensorflow to recognize its -# custom ops, which is necessary for toco to find them -import tensorflow_lattice # pylint: disable=unused-import -from tensorflow.lite.python import tflite_convert - - -def main(): - return tflite_convert.app.run(main=tflite_convert.run_main, argv=sys.argv[:1]) - - -if __name__ == '__main__': - main() diff --git a/tensorflow_lattice/python/estimators/__init__.py b/tensorflow_lattice/layers/__init__.py similarity index 55% rename from tensorflow_lattice/python/estimators/__init__.py rename to tensorflow_lattice/layers/__init__.py index 2e5e266..1d36250 100644 --- a/tensorflow_lattice/python/estimators/__init__.py +++ b/tensorflow_lattice/layers/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2017 The TensorFlow Lattice Authors. +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,9 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# ============================================================================== -"""TensorFlow Lattice tf.estimators.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function +"""'layers' namespace for TFL layers.""" + +from tensorflow_lattice.python.categorical_calibration_layer import CategoricalCalibration +from tensorflow_lattice.python.lattice_layer import Lattice +from tensorflow_lattice.python.linear_layer import Linear +from tensorflow_lattice.python.parallel_combination_layer import ParallelCombination +from tensorflow_lattice.python.pwl_calibration_layer import PWLCalibration diff --git a/tensorflow_lattice/python/BUILD b/tensorflow_lattice/python/BUILD index a1fa44b..6a17db2 100644 --- a/tensorflow_lattice/python/BUILD +++ b/tensorflow_lattice/python/BUILD @@ -1,4 +1,4 @@ -# Copyright 2017 The TensorFlow Lattice Authors. +# Copyright 2019 The TensorFlow Lattice Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -licenses(["notice"]) # Apache 2.0 License package( default_visibility = [ @@ -20,248 +19,297 @@ package( ], ) -exports_files(["LICENSE"]) +licenses(["notice"]) -# All python tests can run under python 2 and 3. -load( - "//tensorflow_lattice:tensorflow_lattice.bzl", - "rpath_linkopts", -) -load( - "@org_tensorflow//tensorflow:tensorflow.bzl", - "tf_gen_op_wrapper_py", +py_library( + name = "pwl_calibration_layer", + srcs = ["pwl_calibration_layer.py"], + srcs_version = "PY2AND3", + deps = [ + ":pwl_calibration_lib", + # absl/logging dep, + # tensorflow:tensorflow_no_contrib dep, + ], ) -load("@org_tensorflow//tensorflow:tensorflow.bzl", "tf_custom_op_py_library") -tf_gen_op_wrapper_py( - name = "pwl_indexing_calibrator_py_wrapper", - out = "ops/gen_pwl_indexing_calibrator.py", - cc_linkopts = rpath_linkopts("pwl_indexing_calibrator_py_wrapper"), - hidden = [], +py_library( + name = "pwl_calibration_lib", + srcs = ["pwl_calibration_lib.py"], + srcs_version = "PY2AND3", deps = [ - "//tensorflow_lattice/cc:pwl_indexing_calibrator_ops_op_lib", + # enum dep, + # tensorflow:tensorflow_no_contrib dep, ], ) -tf_gen_op_wrapper_py( - name = "monotonic_projection_py_wrapper", - out = "ops/gen_monotonic_projection.py", - cc_linkopts = rpath_linkopts("monotonic_projection_py_wrapper"), - hidden = [], +py_test( + name = "pwl_calibration_test", + size = "large", + srcs = ["pwl_calibration_test.py"], + python_version = "PY3", + # shard_count = 12, + srcs_version = "PY2AND3", deps = [ - "//tensorflow_lattice/cc:monotonic_projection_op_op_lib", + ":parallel_combination_layer", + ":pwl_calibration_layer", + ":test_utils", + # absl/logging dep, + # absl/testing:parameterized dep, + # numpy dep, + # tensorflow dep, ], ) -tf_gen_op_wrapper_py( - name = "lattice_interpolation_py_wrapper", - out = "ops/gen_lattice_interpolation.py", - cc_linkopts = rpath_linkopts("lattice_interpolation_py_wrapper"), - hidden = [], - require_shape_functions = True, +py_library( + name = "linear_layer", + srcs = ["linear_layer.py"], + srcs_version = "PY2AND3", deps = [ - "//tensorflow_lattice/cc:lattice_interpolation_ops_op_lib", + ":linear_lib", + # tensorflow:tensorflow_no_contrib dep, ], ) -tf_gen_op_wrapper_py( - name = "monotone_lattice_py_wrapper", - out = "ops/gen_monotone_lattice.py", - cc_linkopts = rpath_linkopts("monotone_lattice_py_wrapper"), - hidden = [], - require_shape_functions = True, +py_library( + name = "linear_lib", + srcs = ["linear_lib.py"], + srcs_version = "PY2AND3", deps = [ - "//tensorflow_lattice/cc:monotone_lattice_ops_op_lib", + ":utils", + # tensorflow:tensorflow_no_contrib dep, ], ) -tf_custom_op_py_library( - name = "lattice_ops_py", - srcs = [ - "ops/lattice_ops.py", - ], - dso = [ - "//tensorflow_lattice/cc:ops/_lattice_ops.so", - ], - kernels = [ - "//tensorflow_lattice/cc:lattice_ops", - "//tensorflow_lattice/cc/kernels:lattice_kernels", - ], +py_library( + name = "categorical_calibration_layer", + srcs = ["categorical_calibration_layer.py"], srcs_version = "PY2AND3", deps = [ - ":lattice_interpolation_py_wrapper", - ":monotone_lattice_py_wrapper", - "@org_tensorflow//tensorflow:tensorflow_py", - "@org_tensorflow//tensorflow/python:framework_for_generated_wrappers", + ":categorical_calibration_lib", + # tensorflow:tensorflow_no_contrib dep, ], ) -tf_custom_op_py_library( - name = "pwl_calibration_ops_py", - srcs = [ - "ops/pwl_calibration_ops.py", +py_library( + name = "categorical_calibration_lib", + srcs = ["categorical_calibration_lib.py"], + srcs_version = "PY2AND3", + deps = [ + ":utils", + # enum dep, + # tensorflow:tensorflow_no_contrib dep, ], - dso = [ - "//tensorflow_lattice/cc:ops/_pwl_calibration_ops.so", +) + +py_test( + name = "categorical_calibration_test", + size = "medium", + timeout = "long", + srcs = ["categorical_calibration_test.py"], + python_version = "PY3", + # shard_count = 4, + srcs_version = "PY2AND3", + deps = [ + ":categorical_calibration_layer", + ":parallel_combination_layer", + ":test_utils", + # absl/logging dep, + # absl/testing:parameterized dep, + # numpy dep, + # tensorflow dep, ], - kernels = [ - "//tensorflow_lattice/cc:pwl_calibration_ops", - "//tensorflow_lattice/cc/kernels:pwl_calibration_kernels", +) + +py_test( + name = "linear_test", + size = "large", + srcs = ["linear_test.py"], + python_version = "PY3", + srcs_version = "PY2AND3", + deps = [ + ":linear_layer", + ":test_utils", + # absl/logging dep, + # absl/testing:parameterized dep, + # numpy dep, + # tensorflow dep, ], +) + +py_library( + name = "lattice_layer", + srcs = ["lattice_layer.py"], srcs_version = "PY2AND3", deps = [ - ":monotonic_projection_py_wrapper", - ":pwl_indexing_calibrator_py_wrapper", - "@org_tensorflow//tensorflow:tensorflow_py", - "@org_tensorflow//tensorflow/python:framework_for_generated_wrappers", + ":categorical_calibration_layer", + ":lattice_lib", + ":pwl_calibration_layer", + # tensorflow:tensorflow_no_contrib dep, ], ) py_library( - name = "keypoints_initialization", - srcs = ["lib/keypoints_initialization.py"], + name = "lattice_lib", + srcs = ["lattice_lib.py"], srcs_version = "PY2AND3", deps = [ - ":tools", - "@org_tensorflow//third_party/py/numpy", - "@six_archive//:six", - "@org_tensorflow//tensorflow:tensorflow_py", + # absl/logging dep, + # tensorflow:tensorflow_no_contrib dep, ], ) py_test( - name = "keypoints_initialization_test", - size = "medium", - srcs = ["lib/keypoints_initialization_test.py"], + name = "lattice_test", + size = "large", + srcs = ["lattice_test.py"], + python_version = "PY3", + # shard_count = 12, srcs_version = "PY2AND3", deps = [ - ":keypoints_initialization", - "@absl_py//absl/testing:parameterized", - "@org_tensorflow//tensorflow:tensorflow_py", + ":lattice_layer", + ":test_utils", + # absl/logging dep, + # absl/testing:parameterized dep, + # numpy dep, + # tensorflow dep, ], ) py_library( - name = "pwl_calibration_layers", - srcs = ["lib/pwl_calibration_layers.py"], + name = "parallel_combination_layer", + srcs = ["parallel_combination_layer.py"], srcs_version = "PY2AND3", deps = [ - ":keypoints_initialization", - ":pwl_calibration_ops_py", - ":regularizers", - ":tools", - "@org_tensorflow//tensorflow:tensorflow_py", + ":categorical_calibration_layer", + ":lattice_layer", + ":linear_layer", + ":pwl_calibration_layer", + # tensorflow:tensorflow_no_contrib dep, ], ) py_test( - name = "pwl_calibration_layers_test", - size = "medium", - srcs = ["lib/pwl_calibration_layers_test.py"], + name = "parallel_combination_test", + size = "large", + srcs = ["parallel_combination_test.py"], + python_version = "PY3", srcs_version = "PY2AND3", deps = [ - ":pwl_calibration_layers", - ":tools", - "@org_tensorflow//third_party/py/numpy", - "@org_tensorflow//tensorflow:tensorflow_py", + ":lattice_layer", + ":parallel_combination_layer", + # absl/logging dep, + # absl/testing:parameterized dep, + # numpy dep, + # tensorflow dep, ], ) py_library( - name = "lattice_layers", - srcs = ["lib/lattice_layers.py"], + name = "configs", + srcs = ["configs.py"], srcs_version = "PY2AND3", deps = [ - ":lattice_ops_py", - ":regularizers", - ":tools", - "@org_tensorflow//tensorflow:tensorflow_py", + # absl/logging dep, + # tensorflow dep, ], ) py_test( - name = "lattice_layers_test", - size = "medium", - srcs = ["lib/lattice_layers_test.py"], + name = "configs_test", + size = "small", + srcs = ["configs_test.py"], + python_version = "PY3", srcs_version = "PY2AND3", deps = [ - ":lattice_layers", - "@org_tensorflow//tensorflow:tensorflow_py", + ":configs", + # absl/logging dep, + # tensorflow dep, ], ) py_library( - name = "monotone_linear_layers", - srcs = ["lib/monotone_linear_layers.py"], + name = "utils", + srcs = ["utils.py"], srcs_version = "PY2AND3", deps = [ - ":regularizers", - ":tools", - "@org_tensorflow//tensorflow:tensorflow_py", + # tensorflow dep, ], ) py_test( - name = "monotone_linear_layers_test", - size = "medium", - srcs = ["lib/monotone_linear_layers_test.py"], + name = "utils_test", + srcs = ["utils_test.py"], + python_version = "PY3", srcs_version = "PY2AND3", deps = [ - ":monotone_linear_layers", - "@org_tensorflow//tensorflow:tensorflow_py", + ":utils", + # tensorflow dep, ], ) py_library( - name = "regularizers", - srcs = ["lib/regularizers.py"], + name = "test_utils", + srcs = ["test_utils.py"], srcs_version = "PY2AND3", deps = [ - ":tools", - "@org_tensorflow//tensorflow:tensorflow_py", + ":visualization", + # absl/logging dep, + # numpy dep, ], ) -py_test( - name = "regularizers_test", - size = "large", - srcs = ["lib/regularizers_test.py"], +py_library( + name = "visualization", + srcs = ["visualization.py"], srcs_version = "PY2AND3", deps = [ - ":regularizers", - "@org_tensorflow//tensorflow:tensorflow_py", + ":model_info", + # graphviz dep, + # matplotlib dep, + # mpl_toolkits/mplot3d dep, + # numpy dep, ], ) -# TensorFlow Lattice internal libraries. py_library( - name = "tools", - srcs = ["lib/tools.py"], + name = "estimators", + srcs = ["estimators.py"], srcs_version = "PY2AND3", deps = [ - "@org_tensorflow//third_party/py/numpy", - "@org_tensorflow//tensorflow:tensorflow_py", + ":categorical_calibration_layer", + ":configs", + ":lattice_layer", + ":lattice_lib", + ":linear_layer", + ":model_info", + ":pwl_calibration_layer", + ":pwl_calibration_lib", + # absl/logging dep, + # tensorflow dep, ], ) py_test( - name = "tools_test", - size = "medium", - srcs = ["lib/tools_test.py"], + name = "estimators_test", + size = "large", + timeout = "long", + srcs = ["estimators_test.py"], + python_version = "PY3", + # shard_count = 8, srcs_version = "PY2AND3", deps = [ - ":test_data", - ":tools", - "@org_tensorflow//third_party/py/numpy", - "@org_tensorflow//tensorflow:tensorflow_py", + ":configs", + ":estimators", + ":model_info", + # absl/logging dep, + # sklearn dep, + # tensorflow dep, ], ) py_library( - name = "test_data", - srcs = ["lib/test_data.py"], + name = "model_info", + srcs = ["model_info.py"], srcs_version = "PY2AND3", - deps = [ - "@org_tensorflow//tensorflow:tensorflow_py", - ], + deps = [], ) diff --git a/tensorflow_lattice/python/__init__.py b/tensorflow_lattice/python/__init__.py index f9600cf..63e23e0 100644 --- a/tensorflow_lattice/python/__init__.py +++ b/tensorflow_lattice/python/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2017 The TensorFlow Lattice Authors. +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,9 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# ============================================================================== """TensorFlow Lattice python package.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function + diff --git a/tensorflow_lattice/python/categorical_calibration_layer.py b/tensorflow_lattice/python/categorical_calibration_layer.py new file mode 100644 index 0000000..0ba07d7 --- /dev/null +++ b/tensorflow_lattice/python/categorical_calibration_layer.py @@ -0,0 +1,310 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Categorical calibration layer with monotonicity and bound constraints. + +Keras implementation of tensorflow lattice categorical calibration layer. This +layer takes single or multi-dimensional input and transforms it using lookup +tables satisfying monotonicity and bounds constraints if specified. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from . import categorical_calibration_lib +import tensorflow as tf +from tensorflow import keras + +DEFAULT_INPUT_VALUE_NAME = "default_input_value" +CATEGORICAL_CALIBRATION_KERNEL_NAME = "categorical_calibration_kernel" + +# TODO: implement variation/variance regularizer. + + +class CategoricalCalibration(keras.layers.Layer): + # pyformat: disable + """Categorical calibration layer with monotonicity and bound constraints. + + This layer takes input of shape `(batch_size, units)` or `(batch_size, 1)` and + transforms it using `units` number of lookup tables satisfying monotonicity + and bounds constraints if specified. If multi dimensional input is provided, + each output will be for the corresponding input, otherwise all calibration + functions will act on the same input. All units share the same layer + configuration, but each one has their separate set of trained parameters. + + Input shape: + Rank-2 tensor with shape: `(batch_size, units)` or `(batch_size, 1)`. + + Output shape: + Rank-2 tensor with shape: `(batch_size, units)`. + + Attributes: + - All `__init__` args. + kernel: TF variable of shape `(batch_size, units)` which stores the lookup + table. + + Example: + + ```python + calibrator = tfl.categorical_calibration_layer.CategoricalCalibration( + # Number of categories. + num_buckets=3, + # Output can be bounded. + output_min=0.0, + output_max=1.0, + # For categorical calibration layer monotonicity is specified for pairs of + # indices of categories. Output for first category in pair will be less + # than or equal to output for second category. + monotonicities=[(0, 1), (0, 2)]) + ``` + + Usage with functional models: + + ```python + input_feature = keras.layers.Input(shape=[1]) + calibrated_feature = tfl.categorical_calibration_layer.CategoricalCalibration( + num_buckets=3, + output_min=0.0, + output_max=1.0, + monotonicities=[(0, 1), (0, 2)], + )(feature) + ... + model = keras.models.Model( + inputs=[input_feature, ...], + outputs=...) + ``` + """ + # pyformat: enable + + def __init__(self, + num_buckets, + units=1, + output_min=None, + output_max=None, + monotonicities=None, + kernel_initializer="uniform", + kernel_regularizer=None, + default_input_value=None, + **kwargs): + # pyformat: disable + """Initializes a `CategoricalCalibration` instance. + + Args: + num_buckets: Number of categories. + units: Output dimension of the layer. See class comments for details. + output_min: Minimum output of calibrator. + output_max: Maximum output of calibrator. + monotonicities: List of pairs with `(i, j)` indices indicating `output(i)` + should be less than or equal to `output(j)`. + kernel_initializer: None or one of: + - `'uniform'`: If `output_min` and `output_max` are provided initial + values will be uniformly sampled from `[output_min, output_max]` + range. + - `'constant'`: If `output_min` and `output_max` are provided all output + values will be initlized to the constant + `(output_min + output_max) / 2`. + - Any Keras initializer object. + kernel_regularizer: None or single element or list of any Keras + regularizer objects. + default_input_value: If set, all inputs which are equal to this value will + be treated as default and mapped to the last bucket. + **kwargs: Other args passed to `tf.keras.layers.Layer` initializer. + + Raises: + ValueError: If layer hyperparameters are invalid. + """ + # pyformat: enable + dtype = kwargs.pop("dtype", tf.float32) # output dtype + super(CategoricalCalibration, self).__init__(dtype=dtype, **kwargs) + + categorical_calibration_lib.verify_hyperparameters( + num_buckets=num_buckets, + output_min=output_min, + output_max=output_max, + monotonicities=monotonicities) + self.num_buckets = num_buckets + self.units = units + self.output_min = output_min + self.output_max = output_max + self.monotonicities = monotonicities + if output_min is not None and output_max is not None: + if kernel_initializer == "constant": + kernel_initializer = keras.initializers.Constant( + (output_min + output_max) / 2) + elif kernel_initializer == "uniform": + kernel_initializer = keras.initializers.RandomUniform( + output_min, output_max) + self.kernel_initializer = keras.initializers.get(kernel_initializer) + self.kernel_regularizer = [] + if kernel_regularizer: + if callable(kernel_regularizer): + kernel_regularizer = [kernel_regularizer] + for reg in kernel_regularizer: + self.kernel_regularizer.append(keras.regularizers.get(reg)) + self.default_input_value = default_input_value + + def build(self, input_shape): + """Standard Keras build() method.""" + if (self.output_min is not None or self.output_max is not None or + self.monotonicities): + constraints = CategoricalCalibrationConstraints( + output_min=self.output_min, + output_max=self.output_max, + monotonicities=self.monotonicities) + else: + constraints = None + + if not self.kernel_regularizer: + kernel_reg = None + elif len(self.kernel_regularizer) == 1: + kernel_reg = self.kernel_regularizer[0] + else: + # Keras interface assumes only one regularizer, so summ all regularization + # losses which we have. + kernel_reg = lambda x: tf.add_n([r(x) for r in self.kernel_regularizer]) + + # categorical calibration layer kernel is units-column matrix with value of + # output(i) = self.kernel[i]. Default value converted to the last index. + self.kernel = self.add_weight( + CATEGORICAL_CALIBRATION_KERNEL_NAME, + shape=[self.num_buckets, self.units], + initializer=self.kernel_initializer, + regularizer=kernel_reg, + constraint=constraints, + dtype=self.dtype) + + if self.kernel_regularizer and not tf.executing_eagerly(): + # Keras has its own mechanism to handle regularization losses which + # does not use GraphKeys, but we want to also add losses to graph keys so + # they are easily accessable when layer is being used outside of Keras. + # Adding losses to GraphKeys will not interfer with Keras. + for reg in self.kernel_regularizer: + tf.compat.v1.add_to_collection( + tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES, reg(self.kernel)) + + super(CategoricalCalibration, self).build(input_shape) + + def call(self, inputs): + """Standard Keras call() method.""" + if self.default_input_value is not None: + default_input_value_tensor = tf.constant( + self.default_input_value, + name=DEFAULT_INPUT_VALUE_NAME, + dtype=inputs.dtype) + replacement = tf.zeros_like(inputs) + (self.num_buckets - 1) + inputs = tf.where( + tf.equal(inputs, default_input_value_tensor), replacement, inputs) + + if inputs.dtype not in [tf.uint8, tf.int32, tf.int64]: + inputs = tf.cast(inputs, dtype=tf.int32) + + # We can't use tf.gather_nd(self.kernel, inputs) as it doesn't support + # constraints (constraint functions are not supported for IndexedSlices). + # Instead we use matrix multiplication by one-hot encoding of the index. + if self.units == 1: + # This can be slightly faster as it uses matmul. + return tf.matmul( + tf.one_hot(tf.squeeze(inputs, axis=[-1]), depth=self.num_buckets), + self.kernel) + return tf.reduce_sum( + tf.one_hot(inputs, axis=1, depth=self.num_buckets) * self.kernel, + axis=1) + + def compute_output_shape(self, input_shape): + """Standard Keras compute_output_shape() method.""" + del input_shape + return [None, self.units] + + def get_config(self): + """Standard Keras config for serialization.""" + config = { + "num_buckets": self.num_buckets, + "units": self.units, + "output_min": self.output_min, + "output_max": self.output_max, + "monotonicities": self.monotonicities, + "kernel_initializer": + keras.initializers.serialize(self.kernel_initializer), + "kernel_regularizer": + [keras.regularizers.serialize(r) for r in self.kernel_regularizer], + "default_input_value": self.default_input_value, + } # pyformat: disable + config.update(super(CategoricalCalibration, self).get_config()) + return config + + def assert_constraints(self, eps=1e-6): + """Asserts that layer weights satisfy all constraints. + + In graph mode builds and returns list of assertion ops. Note that ops will + be created at the moment when this function is being called. + In eager mode directly executes assetions. + + Args: + eps: Allowed constraints violation. + + Returns: + List of assertion ops in graph mode or immideately asserts in eager mode. + """ + return categorical_calibration_lib.assert_constraints( + weights=self.kernel, + output_min=self.output_min, + output_max=self.output_max, + monotonicities=self.monotonicities, + eps=eps) + + +class CategoricalCalibrationConstraints(keras.constraints.Constraint): + # pyformat: disable + """Monotonicity and bounds constraints for categorical calibration layer. + + Updates the weights of CategoricalCalibration layer to satify bound and + monotonicity constraints. The update is an approximate L2 projection into the + constrained parameter space. + + Attributes: + - All `__init__` arguments. + """ + # pyformat: enable + + def __init__(self, output_min=None, output_max=None, monotonicities=None): + """Initializes an instance of `CategoricalCalibrationConstraints`. + + Args: + output_min: Minimum possible output of categorical function. + output_max: Maximum possible output of categorical function. + monotonicities: Monotonicities of CategoricalCalibration layer. + """ + categorical_calibration_lib.verify_hyperparameters( + output_min=output_min, + output_max=output_max, + monotonicities=monotonicities) + self.monotonicities = monotonicities + self.output_min = output_min + self.output_max = output_max + + def __call__(self, w): + """Applies constraints to w.""" + return categorical_calibration_lib.project( + weights=w, + output_min=self.output_min, + output_max=self.output_max, + monotonicities=self.monotonicities) + + def get_config(self): + """Standard Keras config for serialization.""" + return { + "output_min": self.output_min, + "output_max": self.output_max, + "monotonicities": self.monotonicities, + } # pyformat: disable diff --git a/tensorflow_lattice/python/categorical_calibration_lib.py b/tensorflow_lattice/python/categorical_calibration_lib.py new file mode 100644 index 0000000..e7de9cc --- /dev/null +++ b/tensorflow_lattice/python/categorical_calibration_lib.py @@ -0,0 +1,161 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helpers and computations of categorical calibration layer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from . import utils +import tensorflow as tf + + +def project(weights, output_min, output_max, monotonicities): + """Monotonicity/bounds constraints implementation for categorical calibration. + + Returns the approximate L2 projection of the CategoricalCalibration weights + into the constrained parameter space. + + Args: + weights: Tensor which represents weights of Categorical calibration layer. + output_min: Lower bound constraint on weights. + output_max: Upper bound constraint on weights. + monotonicities: List of pair of indices `(i, j)`, indicating constraint + `weight[i] <= weight[j]`. + + Returns: + Projected `weights` tensor. + + Raises: + ValueError: If monotonicities are not of the correct format or are circular. + """ + num_buckets = weights.shape[0] + verify_hyperparameters( + num_buckets=num_buckets, + output_min=output_min, + output_max=output_max, + monotonicities=monotonicities) + + projected_weights = weights + + if monotonicities: + projected_weights = ( + utils.approximately_project_categorical_partial_monotonicities( + projected_weights, monotonicities)) + + if output_min is not None: + projected_weights = tf.maximum(projected_weights, output_min) + if output_max is not None: + projected_weights = tf.minimum(projected_weights, output_max) + return projected_weights + + +def assert_constraints(weights, + output_min, + output_max, + monotonicities, + debug_tensors=None, + eps=1e-6): + """Asserts that `weights` satisfiy constraints. + + Args: + weights: Tensor which represents weights of Categorical calibration layer. + output_min: Lower bound constraint on weights. + output_max: Upper bound constraint on weights. + monotonicities: List of pair of indices `(i, j)`, indicating constraint + `weight[i] <= weight[j]`. + debug_tensors: None or list of anything convertible to tensor (for example + tensors or strings) which will be printed in case of constraints + violation. + eps: Allowed constraints violation. + + Returns: + List of assertion ops in graph mode or immideately asserts in eager mode. + """ + num_buckets = weights.shape[0] + verify_hyperparameters( + num_buckets=num_buckets, + output_min=output_min, + output_max=output_max, + monotonicities=monotonicities) + + info = ["Outputs: ", weights, "Epsilon: ", eps] + if debug_tensors: + info += debug_tensors + asserts = [] + + if output_min is not None: + min_output = tf.reduce_min(weights) + asserts.append( + tf.Assert( + min_output >= output_min - eps, + data=["Lower bound violation.", "output_min:", output_min] + info, + summarize=num_buckets)) + + if output_max is not None: + max_output = tf.reduce_max(weights) + asserts.append( + tf.Assert( + max_output <= output_max + eps, + data=["Upper bound violation.", "output_max:", output_max] + info, + summarize=num_buckets)) + + if monotonicities: + left = tf.gather_nd(weights, [[i] for (i, j) in monotonicities]) + right = tf.gather_nd(weights, [[j] for (i, j) in monotonicities]) + asserts.append( + tf.Assert( + tf.reduce_min(left - right) < eps, + data=["Monotonicity violation.", "monotonicities:", monotonicities] + + info, + summarize=num_buckets)) + + return asserts + + +def verify_hyperparameters(num_buckets=None, + output_min=None, + output_max=None, + monotonicities=None): + """Verifies that all given hyperparameters are consistent. + + See `tfl.lattice_layer.CategoricalCalibration` class level comment for + detailes. + + Args: + num_buckets: `num_buckets` of CategoricalCalibration layer. + output_min: `smallest output` of CategoricalCalibration layer. + output_max: `largest output` of CategoricalCalibration layer. + monotonicities: `monotonicities` of CategoricalCalibration layer. + + Raises: + ValueError: If parameters are incorrect or inconsistent. + """ + if output_min is not None and output_max is not None: + if output_max < output_min: + raise ValueError( + "If specified output_max must be greater than output_min. " + "They are: ({}, {})".format(output_min, output_max)) + + if monotonicities: + if (not isinstance(monotonicities, list) or + not all(isinstance(m, tuple) and len(m) == 2 for m in monotonicities)): + raise ValueError("Monotonicities should be a list of pairs (tuples).") + for (i, j) in monotonicities: + if (i < 0 or j < 0 or (num_buckets is not None and + (i >= num_buckets or j >= num_buckets))): + raise ValueError( + "Monotonicities should be pairs of be indices in range " + "[0, num_buckets). They are: {}".format(monotonicities)) diff --git a/tensorflow_lattice/python/categorical_calibration_test.py b/tensorflow_lattice/python/categorical_calibration_test.py new file mode 100644 index 0000000..3ca51fe --- /dev/null +++ b/tensorflow_lattice/python/categorical_calibration_test.py @@ -0,0 +1,316 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for categorical calibration layer. + +This test should be run with "-c opt" since otherwise it's slow. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import logging +from absl.testing import parameterized +import numpy as np +import tensorflow as tf +from tensorflow import keras +from tensorflow_lattice.python import categorical_calibration_layer as categorical_calibraion +from tensorflow_lattice.python import parallel_combination_layer as parallel_combination +from tensorflow_lattice.python import test_utils + + +class CategoricalCalibrationLayerTest(parameterized.TestCase, tf.test.TestCase): + + def setUp(self): + self._disable_all = False + self._loss_eps = 1e-2 + self._loss_diff_eps = 1e-4 + super(CategoricalCalibrationLayerTest, self).setUp() + + def _ResetAllBackends(self): + keras.backend.clear_session() + tf.compat.v1.reset_default_graph() + + def _ScatterXUniformly(self, units, num_points, num_buckets, + missing_probability, default_input_value): + """Randomly uniformly scatters points across input space.""" + data = [] + for unit_idx in range(units): + if missing_probability > 0.0: + missing_points = int(num_points * missing_probability) + else: + missing_points = 0 + + x = ([default_input_value for _ in range(missing_points)] + + [i % num_buckets for i in range(num_points - missing_points)]) + np.random.seed(unit_idx) + np.random.shuffle(x) + if data: + data = [values + (value,) for values, value in zip(data, x)] + else: + data = [(value,) for value in x] + + return [np.asarray(v, dtype=np.int32) for v in data] + + def _SetDefaults(self, config): + config.setdefault("units", 1) + config.setdefault("use_multi_calibration_layer", False) + config.setdefault("one_d_input", False) + config.setdefault("output_min", None) + config.setdefault("output_max", None) + config.setdefault("default_input_value", None) + config.setdefault("monotonicities", None) + config.setdefault("missing_probability", 0.0) + config.setdefault("constraint_assertion_eps", 1e-6) + config.setdefault("kernel_regularizer", None) + config.setdefault("model_dir", "/tmp/test_pwl_model_dir/") + return config + + def _TrainModel(self, config, plot_path=None): + """Trains model and returns loss. + + Args: + config: Layer config internal for this test which specifies params of + piecewise linear layer to train. + plot_path: if specified - png file name to save visualization. See + test_utils.run_training_loop() for more details. + + Returns: + Training loss. + """ + logging.info("Testing config:") + logging.info(config) + config = self._SetDefaults(config) + + self._ResetAllBackends() + + if config["default_input_value"] is not None: + # default_input_value is mapped to the last bucket, hence x_generator + # needs to generate in [0, ..., num_buckets-2] range. + num_random_buckets = config["num_buckets"] - 1 + else: + num_random_buckets = config["num_buckets"] + + # The input to the model can either be single or multi dimensional. + input_units = 1 if config["one_d_input"] else config["units"] + + training_inputs = config["x_generator"]( + units=input_units, + num_points=config["num_training_records"], + num_buckets=num_random_buckets, + missing_probability=config["missing_probability"], + default_input_value=config["default_input_value"]) + training_labels = [config["y_function"](x) for x in training_inputs] + + # Either create multiple CategoricalCalibration layers and combine using a + # ParallelCombination layer, or create a single CategoricalCalibration with + # multiple output dimensions. + if config["use_multi_calibration_layer"]: + num_calibration_layers = config["units"] + categorical_calibraion_units = 1 + else: + num_calibration_layers = 1 + categorical_calibraion_units = config["units"] + + model = keras.models.Sequential() + model.add(keras.layers.Input(shape=[input_units], dtype=tf.int32)) + calibration_layers = [] + for _ in range(num_calibration_layers): + calibration_layers.append( + categorical_calibraion.CategoricalCalibration( + units=categorical_calibraion_units, + kernel_initializer="constant", + num_buckets=config["num_buckets"], + output_min=config["output_min"], + output_max=config["output_max"], + monotonicities=config["monotonicities"], + kernel_regularizer=config["kernel_regularizer"], + default_input_value=config["default_input_value"])) + if len(calibration_layers) == 1: + model.add(calibration_layers[0]) + else: + model.add(parallel_combination.ParallelCombination(calibration_layers)) + if config["units"] > 1: + model.add(keras.layers.Lambda( + lambda x: tf.reduce_mean(x, axis=1, keepdims=True))) + model.compile( + loss=keras.losses.mean_squared_error, + optimizer=config["optimizer"](learning_rate=config["learning_rate"])) + + training_data = (training_inputs, training_labels, training_inputs) + + loss = test_utils.run_training_loop( + config=config, + training_data=training_data, + keras_model=model, + plot_path=plot_path, + input_dtype=np.int32) + + assetion_ops = [] + for calibration_layer in calibration_layers: + assetion_ops.extend( + calibration_layer.assert_constraints( + eps=config["constraint_assertion_eps"])) + if not tf.executing_eagerly() and assetion_ops: + tf.compat.v1.keras.backend.get_session().run(assetion_ops) + + return loss + + @parameterized.parameters((np.mean,), (lambda x: -np.mean(x),)) + def testUnconstrainedNoMissingValue(self, y_function): + if self._disable_all: + return + config = { + "num_training_records": 200, + "num_training_epoch": 500, + "optimizer": tf.keras.optimizers.Adam, + "learning_rate": 0.15, + "x_generator": self._ScatterXUniformly, + "y_function": y_function, + "num_buckets": 10, + "output_min": None, + "output_max": None, + "monotonicities": None, + } + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.0, delta=self._loss_eps) + config["units"] = 3 + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.0, delta=self._loss_eps) + config["one_d_input"] = True + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.0, delta=self._loss_eps) + + @parameterized.parameters((np.mean,), (lambda x: -np.mean(x),)) + def testUnconstrainedWithMissingValue(self, y_function): + if self._disable_all: + return + config = { + "num_training_records": 200, + "num_training_epoch": 500, + "optimizer": tf.keras.optimizers.Adam, + "learning_rate": 0.15, + "x_generator": self._ScatterXUniformly, + "y_function": y_function, + "num_buckets": 10, + "output_min": None, + "output_max": None, + "monotonicities": None, + "default_input_value": -1, + "missing_probability": 0.1, + } + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.0, delta=self._loss_eps) + config["units"] = 3 + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.0, delta=self._loss_eps) + config["one_d_input"] = True + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.0, delta=self._loss_eps) + + @parameterized.parameters( + (0.0, 9.0, None, 0.0), + (1.0, 8.0, None, 0.2), + (1.0, 8.0, [(6, 5)], 0.25), + (1.0, 8.0, [(6, 5), (5, 4)], 0.4), + (1.0, 8.0, [(6, 5), (7, 5)], 0.4), + (1.0, 8.0, [(6, 5), (5, 4), (4, 3)], 0.7), + (1.0, 8.0, [(7, 6), (6, 5), (4, 3), (3, 2)], 0.6), + (1.0, 8.0, [(7, 6), (6, 5), (5, 4), (4, 3), (3, 2)], 1.95), + ) + def testConstraints(self, output_min, output_max, monotonicities, + expected_loss): + if self._disable_all: + return + config = { + "num_training_records": 1000, + "num_training_epoch": 1000, + "optimizer": tf.keras.optimizers.Adam, + "learning_rate": 1.0, + "x_generator": self._ScatterXUniformly, + "y_function": np.mean, + "num_buckets": 10, + "output_min": output_min, + "output_max": output_max, + "monotonicities": monotonicities, + } + + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + + # Same input with multiple calibration units, should give out the same loss. + config["one_d_input"] = True + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + + # With independently sampled unit-dim inputs loss is caled by 1/units. + config["one_d_input"] = False + loss = self._TrainModel(config) + self.assertAlmostEqual( + loss, + expected_loss / config["units"], + delta=self._loss_eps * config["units"]) + + # Using separate calibration layers should give out the same loss. + config["use_multi_calibration_layer"] = True + loss_multi_calib = self._TrainModel(config) + self.assertAlmostEqual(loss, loss_multi_calib, delta=self._loss_diff_eps) + + def testCircularMonotonicites(self): + if self._disable_all: + return + config = { + "num_training_records": 200, + "num_training_epoch": 500, + "optimizer": tf.keras.optimizers.Adam, + "learning_rate": 0.15, + "x_generator": self._ScatterXUniformly, + "y_function": float, + "num_buckets": 5, + "monotonicities": [(0, 1), (1, 2), (2, 3), (3, 4), (4, 0)], + } + + with self.assertRaises(ValueError): + self._TrainModel(config) + + @parameterized.parameters( + # Standard Keras regularizer: + (keras.regularizers.l1_l2(l1=0.01, l2=0.001),), + # Tuple of regularizers: + ((keras.regularizers.l1_l2(l1=0.01, l2=0.0), + keras.regularizers.l1_l2(l1=0.0, l2=0.001)),), + ) + def testRegularizers(self, regularizer): + if self._disable_all: + return + config = { + "num_training_records": 20, + "num_training_epoch": 0, + "optimizer": tf.keras.optimizers.Adam, + "learning_rate": 1.0, + "x_generator": self._ScatterXUniformly, + "y_function": lambda _: 2.0, + "kernel_regularizer": regularizer, + "num_buckets": 3, + "output_min": 0.0, + "output_max": 4.0, + } + loss = self._TrainModel(config) + # This loss is pure regularization loss because initializer matches target + # function and there was 0 training epochs. + self.assertAlmostEqual(loss, 0.072, delta=self._loss_eps) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensorflow_lattice/python/configs.py b/tensorflow_lattice/python/configs.py new file mode 100644 index 0000000..03eacef --- /dev/null +++ b/tensorflow_lattice/python/configs.py @@ -0,0 +1,841 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""TFL model configuration library for canned estimators. + +To construct a TFL canned estimator, construct a model configuration and pass +it to the canned estimator constructor: + +```python +feature_columns = ... +model_config = tfl.configs.CalibratedLatticeConfig(...) +feature_analysis_input_fn = create_input_fn(num_epochs=1, ...) +train_input_fn = create_input_fn(num_epochs=100, ...) +estimator = tfl.estimators.CannedClassifier( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=feature_analysis_input_fn) +estimator.train(input_fn=train_input_fn) +``` + +Supported models are: + +* **Calibrated linear model**: Constructed using + `tfl.configs.CalibratedLinearConfig`. + A calibrated linear model that applies piecewise-linear and categorical + calibration on the input feature, followed by a linear combination and an + optional output piecewise-linear calibration. When using output calibration + or when output bounds are specified, the linear layer will apply weighted + averaging on calibrated inputs. + +* **Calibrated lattice model**: Constructed using + `tfl.configs.CalibratedLatticeConfig`. + A calibrated lattice model applies piecewise-linear and categorical + calibration on the input feature, followed by a lattice model and an + optional output piecewise-linear calibration. + +* **Calibrated lattice ensemble model**: Constructed using + `tfl.configs.CalibratedLatticeEnsembleConfig`. + A calibrated lattice ensemble model applies piecewise-linear and categorical + calibration on the input feature, followed by an ensemble of lattice models + and an optional output piecewise-linear calibration. + +Feature calibration and per-feature configurations are set using +`tfl.configs.FeatureConfig`. Feature configurations include monotonicity +constraints, per-feature regularization (see `tfl.configs.RegularizerConfig`), +and lattice sizes for lattice models. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from absl import logging + +_HPARAM_FEATURE_PREFIX = 'feature' +_HPARAM_REGULARIZER_PREFIX = 'regularizer' + + +class _Config(object): + """Base class for configs.""" + + def __init__(self, kwargs): + if 'self' in kwargs: + kwargs.pop('self') + if '__class__' in kwargs: + kwargs.pop('__class__') + self.__dict__ = kwargs + + def __repr__(self): + return self.__dict__.__repr__() + + +class _HasFeatureConfigs(object): + """Base class for configs with `feature_configs` attribute.""" + + def feature_config_by_name(self, feature_name): + """Returns existing or default FeatureConfig with the given name.""" + if self.feature_configs is None: + self.feature_configs = [] + for feature_config in self.feature_configs: + if feature_config.name == feature_name: + return feature_config + feature_config = FeatureConfig(feature_name) + self.feature_configs.append(feature_config) + return feature_config + + +class _HasRegularizerConfigs(object): + """Base class for configs with `regularizer_configs` attribute.""" + + def regularizer_config_by_name(self, regularizer_name): + """Returns existing or default RegularizerConfig with the given name.""" + if self.regularizer_configs is None: + self.regularizer_configs = [] + for regularizer_config in self.regularizer_configs: + if regularizer_config.name == regularizer_name: + return regularizer_config + regularizer_config = RegularizerConfig(regularizer_name) + self.regularizer_configs.append(regularizer_config) + return regularizer_config + + +# pylint: disable=unused-argument + + +class CalibratedLatticeEnsembleConfig(_Config, _HasFeatureConfigs, + _HasRegularizerConfigs): + """Config for calibrated lattice model. + + A calibrated lattice ensemble model applies piecewise-linear and categorical + calibration on the input feature, followed by an ensemble of lattice models + and an optional output piecewise-linear calibration. + + The ensemble structure can be one of the following and set via the lattice + flag: + + - Expliclit list of list of features specifying features used in each + submodel. + - A random arrangement (also called Random Tiny Lattices, or RTL). + - Crystals growing algorithm: This algorithm first constructs a prefitting + model to assess pairwise interactions between features, and then uses + those estimates to construct a final model that puts interacting + features in the same lattice. For details see "Fast and flexible monotonic + functions with ensembles of lattices", Advances in Neural Information + Processing Systems, 2016. + + Examples: + + Creating a random ensemble (RTL) model: + + ```python + model_config = tfl.configs.CalibratedLatticeEnsembleConfig( + num_lattices=6, # number of lattices + lattice_rank=5, # number of features in each lattice + feature_configs=[...], + ) + feature_analysis_input_fn = create_input_fn(num_epochs=1, ...) + train_input_fn = create_input_fn(num_epochs=100, ...) + estimator = tfl.estimators.CannedClassifier( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=feature_analysis_input_fn) + estimator.train(input_fn=train_input_fn) + ``` + To create a Crystals model, you will need to provide a *prefitting_input_fn* + to the estimator constructor. This input_fn is used to train the prefitting + model, as described above. The prefitting model does not need to be fully + trained, so a few epochs should be enough. + + ```python + model_config = tfl.configs.CalibratedLatticeEnsembleConfig( + lattices='crystals', # feature arrangement method + num_lattices=6, # number of lattices + lattice_rank=5, # number of features in each lattice + feature_configs=[...], + ) + feature_analysis_input_fn = create_input_fn(num_epochs=1, ...) + prefitting_input_fn = create_input_fn(num_epochs=5, ...) + train_input_fn = create_input_fn(num_epochs=100, ...) + estimator = tfl.estimators.CannedClassifier( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=feature_analysis_input_fn + prefitting_input_fn=prefitting_input_fn) + estimator.train(input_fn=train_input_fn) + ``` + """ + _HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks. + + def __init__(self, + feature_configs=None, + lattices='random', + num_lattices=None, + lattice_rank=None, + separate_calibrators=True, + regularizer_configs=None, + output_min=None, + output_max=None, + output_calibration=False, + output_calibration_num_keypoint=10, + output_initialization='quantiles', + fix_ensemble_for_2d_constraints=True, + random_seed=0): + # pyformat: disable + """Initializes a `CalibratedLatticeEnsembleConfig` instance. + + Args: + feature_configs: A list of `tfl.configs.FeatureConfig` instances that + specify configurations for each feature. If a configuration is not + provided for a feature, a default configuration will be used. + lattices: Should be one of the following: + - String `'random'` indicating that the features in each lattice should + be selected randomly + - String `'crystals'` to use a heuristic to construct the lattice + ensemble based on pairwise feature interactions + - An explicit list of list of feature names to be used in each lattice + in the ensemble. + num_lattices: Number of lattices in the ensemble. Must be provided if + lattices are not explicitly provided. + lattice_rank: Number of features in each lattice. Must be provided if + lattices are not explicitly provided. + separate_calibrators: If features should be separately calibrated for each + lattice in the ensemble. + regularizer_configs: A list of `tfl.configs.RegularizerConfig` instances + that apply global regularization. + output_min: Lower bound constraint on the output of the model. + output_max: Upper bound constraint on the output of the model. + output_calibration: If a piecewise-linear calibration should be used on + the output of the lattice. + output_calibration_num_keypoint: Number of keypoints to use for the output + piecewise-linear calibration. + output_initialization: The initial values to setup for the output of the + model. When using output calibration, these values are used to initliaze + the output keypoints of the output piecewise-linear calibration. + Otherwise the lattice parameters will be setup to form a linear function + in the range of output_initialization. It can be one of: + - String `'quantiles'`: Output is initliazed to label quantiles, if + possible. + - String `'uniform'`: Output is initliazed uniformly in label range. + - A list of numbers: To be used for initialization of the output + lattice or output calibrator. + fix_ensemble_for_2d_constraints: A boolean indicating whether to add + missing features to some lattices to resolve potential 2d constraint + violations which require lattices from ensemble to either contain both + constrained features or none of them, e.g. trapezoid trust constraint + requires a lattice that has the "conditional" feature to include the + "main" feature. Note that this might increase the final lattice rank. + random_seed: Random seed to use for randomized lattices. + """ + # pyformat: enable + super(CalibratedLatticeEnsembleConfig, self).__init__(locals()) + + +class CalibratedLatticeConfig(_Config, _HasFeatureConfigs, + _HasRegularizerConfigs): + """Config for calibrated lattice model. + + A calibrated lattice model applies piecewise-linear and categorical + calibration on the input feature, followed by a lattice model and an + optional output piecewise-linear calibration. + + Example: + + ```python + model_config = tfl.configs.CalibratedLatticeConfig( + feature_configs=[...], + ) + feature_analysis_input_fn = create_input_fn(num_epochs=1, ...) + train_input_fn = create_input_fn(num_epochs=100, ...) + estimator = tfl.estimators.CannedClassifier( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=feature_analysis_input_fn) + estimator.train(input_fn=train_input_fn) + ``` + """ + _HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks. + + def __init__(self, + feature_configs=None, + regularizer_configs=None, + output_min=None, + output_max=None, + output_calibration=False, + output_calibration_num_keypoint=10, + output_initialization='quantiles'): + """Initializes a `CalibratedLatticeConfig` instance. + + Args: + feature_configs: A list of `tfl.configs.FeatureConfig` instances that + specify configurations for each feature. If a configuration is not + provided for a feature, a default configuration will be used. + regularizer_configs: A list of `tfl.configs.RegularizerConfig` instances + that apply global regularization. + output_min: Lower bound constraint on the output of the model. + output_max: Upper bound constraint on the output of the model. + output_calibration: If a piecewise-linear calibration should be used on + the output of the lattice. + output_calibration_num_keypoint: Number of keypoints to use for the output + piecewise-linear calibration. + output_initialization: The initial values to setup for the output of the + model. When using output calibration, these values are used to initliaze + the output keypoints of the output piecewise-linear calibration. + Otherwise the lattice parameters will be setup to form a linear function + in the range of output_initialization. It can be one of: + - String `'quantiles'`: Output is initliazed to label quantiles, if + possible. + - String `'uniform'`: Output is initliazed uniformly in label range. + - A list of numbers: To be used for initialization of the output + lattice or output calibrator. + """ + super(CalibratedLatticeConfig, self).__init__(locals()) + + +class CalibratedLinearConfig(_Config, _HasFeatureConfigs, + _HasRegularizerConfigs): + """Config for calibrated lattice model. + + A calibrated linear model applies piecewise-linear and categorical + calibration on the input feature, followed by a linear combination and an + optional output piecewise-linear calibration. When using output calibration + or when output bounds are specified, the linear layer will be apply weighted + averaging on calibrated inputs. + + Example: + + ```python + model_config = tfl.configs.CalibratedLinearConfig( + feature_configs=[...], + ) + feature_analysis_input_fn = create_input_fn(num_epochs=1, ...) + train_input_fn = create_input_fn(num_epochs=100, ...) + estimator = tfl.estimators.CannedClassifier( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=feature_analysis_input_fn) + estimator.train(input_fn=train_input_fn) + ``` + """ + _HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks. + + def __init__(self, + feature_configs=None, + regularizer_configs=None, + use_bias=True, + output_min=None, + output_max=None, + output_calibration=False, + output_calibration_num_keypoint=10, + output_initialization='quantiles'): + """Initializes a `CalibratedLinearConfig` instance. + + Args: + feature_configs: A list of `tfl.configs.FeatureConfig` instances that + specify configurations for each feature. If a configuration is not + provided for a feature, a default configuration will be used. + regularizer_configs: A list of `tfl.configs.RegularizerConfig` instances + that apply global regularization. + use_bias: If a bias term should be used for the linear combination. + output_min: Lower bound constraint on the output of the model. + output_max: Upper bound constraint on the output of the model. + output_calibration: If a piecewise-linear calibration should be used on + the output of the lattice. + output_calibration_num_keypoint: Number of keypoints to use for the output + piecewise-linear calibration. + output_initialization: The initial values to setup for the output of the + model. When using output calibration, these values are used to initliaze + the output keypoints of the output piecewise-linear calibration. + Otherwise the lattice parameters will be setup to form a linear function + in the range of output_initialization. It can be one of: + - String `'quantiles'`: Output is initliazed to label quantiles, if + possible. + - String `'uniform'`: Output is initliazed uniformly in label range. + - A list of numbers: To be used for initialization of the output + lattice or output calibrator. + """ + super(CalibratedLinearConfig, self).__init__(locals()) + + +class FeatureConfig(_Config, _HasRegularizerConfigs): + """Per-feature configuration for TFL canned estimators. + + A feature can either be numerical or categorical. Numeric features will be + calibrated using a piecewise-linear function with the given number of + keypoints. Categorical features should have `num_buckets > 0` and the + `vocabulary_list` represent their categories. Several of the config fields + can be filled in automatically based on the `FeatureColumns` used by the + model but can also be provided explicitly. See `__init__` args comments for + details. + + Currently only one dimensional feature are supported. + + Examples: + + ```python + feature_columns = [ + tf.feature_column.numeric_column.numeric_column( + 'age', default_value=-1), + tf.feature_column.numeric_column.categorical_column_with_vocabulary_list( + 'thal', vocabulary_list=['normal', 'fixed', 'reversible']), + ... + ] + + model_config = tfl.configs.CalibratedLatticeConfig( + feature_configs=[ + tfl.configs.FeatureConfig( + name='age', + lattice_size=3, + # Monotonically increasing. + monotonicity='increasing', + # Per feature regularization. + regularizer_configs=[ + tfl.configs.RegularizerConfig(name='calib_hessian', l2=1e-4), + ], + ), + tfl.configs.FeatureConfig( + name='thal', + # Partial monotonicity: + # output(normal) <= output(fixed) + # output(normal) <= output(reversible) + monotonicity=[('normal', 'fixed'), ('normal', 'reversible')], + ), + ], + # Global regularizers + regularizer_configs=[...]) + feature_analysis_input_fn = create_input_fn(num_epochs=1, ...) + train_input_fn = create_input_fn(num_epochs=100, ...) + estimator = tfl.estimators.CannedClassifier( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=feature_analysis_input_fn) + estimator.train(input_fn=train_input_fn) + ``` + """ + _HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks. + + def __init__(self, + name, + is_missing_name=None, + default_value=None, + lattice_size=2, + monotonicity='none', + unimodality='none', + reflects_trust_in=None, + dominates=None, + pwl_calibration_always_monotonic=False, + pwl_calibration_convexity=0, + pwl_calibration_num_keypoints=10, + pwl_calibration_input_keypoints='quantiles', + pwl_calibration_clip_min=None, + pwl_calibration_clip_max=None, + pwl_calibration_clamp_min=False, + pwl_calibration_clamp_max=False, + num_buckets=0, + vocabulary_list=None, + regularizer_configs=None): + """Initializes a `FeatureConfig` instance. + + Args: + name: The name of the feature, which should match the name of a given + FeatureColumn or a key in the input feature dict. + is_missing_name: The name of a FeatureColumn or key in the input feature + dict that indicates missing-ness of the main feature. + default_value: [Automatically filled in from `FeatureColumns`] If set, + this value in the input value represents missing. For numeric features, + the output will be imputed. If default_value is provided for a + categocial features, it would corresponds to the last bucket counted in + num_buckets. + lattice_size: The number of lattice verticies to be used along the axis + for this feature. + monotonicity: + - For numeric features, specifies if the model output should + be monotonic in this feature, using 'increasing' or 1 to indicate + increasing monotonicity, 'decreasing' or -1 to indicate decreasing + monotonicity, and 'none' or 0 to indicate no monotonicity constraints. + - For categorical features, a list of (category_a, category_b) pairs + from the vocabulary list indicating that with other features fixed, + model output for category_b should be greater than or equal to + category_a. + unimodality: For numeric features specifies if the model output should + be unimodal in corresponding feature, using 'valley' or 1 to indicate + that function first decreases, then increases and 'none' or 0 to + indicate no unimodality constraints. Not used for categorical features. + reflects_trust_in: None or a list of `tfl.configs.TrustConfig` instances. + dominates: None or a list of `tfl.configs.DominanceConfig` instances. + pwl_calibration_always_monotonic: Specifies if the piecewise-linear + calibration should always be monotonic regardless of the specified + end-to-end model output `monotonicity` with respect to this feature. + pwl_calibration_convexity: Spefices the convexity constraints of the + calibrators for numeric features. Convexity is indicated by 'convex' or + 1, concavity is indicated by 'concave' or -1, 'none' or 0 indicates no + convexity/concavity constraints. Does not affect categorical features. + Concavity together with increasing monotonicity as well as convexity + together with decreasing monotonicity results in diminishing return + constraints. + pwl_calibration_num_keypoints: Number of keypoints to use for + piecewise-linear calibration. + pwl_calibration_input_keypoints: Indicates what should be used for the + input keypoints of the piecewise-linear calibration. It can be one of: + - String `'quantiles'`: Input keypoints are set to feature quantiles. + - String `'uniform'`: Input keypoints are uniformly spaced in feature + range. + - A list of numbers: Explicitly specifies the keypoints. + pwl_calibration_clip_min: Input values are lower clipped by this value. + pwl_calibration_clip_max: Input values are upper clipped by this value. + pwl_calibration_clamp_min: for monotonic calibrators ensures that the + minimum value in calibration output is reached. + pwl_calibration_clamp_max: for monotonic calibrators ensures that the + maximum value in calibration output is reached. + num_buckets: [Automatically filled in from `FeatureColumns`] Number of + categories for a categorical feature. Out-of-vocabulary and + missing/default value should be counted into num_buckets (last buckets). + vocabulary_list: [Automatically filled in from `FeatureColumns`] The input + vocabulary of the feature. + regularizer_configs: None or a list of per-feature + `tfl.configs.RegularizerConfig` instances. + """ + super(FeatureConfig, self).__init__(locals()) + + +class RegularizerConfig(_Config): + """Regularizer configuration for TFL canned estimators. + + Regularizers can either be applied to specific features, or can be applied + globally to all features or lattices. + + + * **Calibrator regularizers:** + + These regularizers are applied to PWL calibration layers. + + - `'calib_laplacian'`: Creates an instance of + `tfl.pwl_calibration_layer.LaplacianRegularizer`. A calibrator laplacian + regularizer penalizes the changes in the output and results in a *flatter + calibration function*. + - `'calib_hessian'`: Creates an instance of + `tfl.pwl_calibration_layer.HessianRegularizer`. A calibrator hessian + regularizer penalizes changes in the slope, resulting in a *more linear + calibration*. + - `'calib_wrinkle'`: Creates an instance of + `tfl.pwl_calibration_layer.WrinkleRegularizer`. A calibrator wrinkle + regularizer penalizes the second derivative, resulting in a smoother + function with *less changes in the curvature*. + + + * **Lattice regularizers:** + + These regularizers are applied to lattice layers. + + - `'laplacian'`: Creates an instance of + `tfl.lattice_layer.LaplacianRegularizer`. Laplacian regularizers penalize + the difference between adjacent vertices in multi-cell lattice, resulting + in a *flatter lattice function*. + - `'torsion'`: Creates an instance of + `tfl.lattice_layer.TorsionRegularizer`. Torsion regularizers penalizes + how much the lattice function twists from side-to-side, a non-linear + interactions in each 2 x 2 cell. Using this regularization results in a + *more linear lattice function*. + + + Examples: + + ```python + model_config = tfl.configs.CalibratedLatticeConfig( + feature_configs=[ + tfl.configs.FeatureConfig( + name='age', + lattice_size=3, + # Per feature regularization. + regularizer_configs=[ + tfl.configs.RegularizerConfig(name='calib_hessian', l2=1e-4), + ], + ), + tfl.configs.FeatureConfig( + name='thal', + # Partial monotonicity: + # output(normal) <= output(fixed) + # output(normal) <= output(reversible) + monotonicity=[('normal', 'fixed'), ('normal', 'reversible')], + ), + ], + # Global regularizers + regularizer_configs=[ + # Torsion regularizer applied to the lattice to make it more linear. + configs.RegularizerConfig(name='torsion', l2=1e-4), + # Globally defined calibration regularizer is applied to all features. + configs.RegularizerConfig(name='calib_hessian', l2=1e-4), + ]) + feature_analysis_input_fn = create_input_fn(num_epochs=1, ...) + train_input_fn = create_input_fn(num_epochs=100, ...) + estimator = tfl.estimators.CannedClassifier( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=feature_analysis_input_fn) + estimator.train(input_fn=train_input_fn) + ``` + """ + _HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks. + + def __init__(self, name, l1=0.0, l2=0.0): + """Initializes a `RegularizerConfig` instance. + + Args: + name: The name of the regularizer. + l1: l1 regularization amount. + l2: l2 regularization amount. + """ + super(RegularizerConfig, self).__init__(locals()) + + +class TrustConfig(_Config): + """Configuration for feature trusts in TFL canned estimators. + + You can specify how a feature reflects trust in another feature. Supported + trust types (see `tfl.lattice_layer.Lattice` for details): + + - `'edgeworth'`: Edgeworth trust constrains the function to be more + responsive to a main feature as a secondary conditional feature increases + or decreases. For example, we may want the model to rely more on average + rating (main feature) when the number of reviews (conditional feature) is + high. In particular, the constraint guarantees that a given change in the + main feature's value will change the model output by more when a secondary + feature indicates higher trust in the main feature. Note that the + constraint only works when the model is monotonic in the main feature. + - `'trapezoid'`: Trapezoid trust is conceptually similar to edgeworth trust, + but this constraint guarantees that the range of possible outputs along + the main feature dimension, when a conditional feature indicates low + trust, is a *subset* of the range of outputs when a conditional feature + indicates high trust. When lattices have 2 vertices in each constrained + dimension, this implies edgeworth trust (which only constrains the size of + the relevant ranges). With more than 2 lattice vertices per dimension, the + two constraints diverge and are not necessarily 'weaker' or 'stronger' + than each other - edgeworth trust acts throughout the lattice interior on + delta shifts in the main feature, while trapezoid trust only acts on the + min and max extremes of the main feature, constraining the overall range + of outputs across the domain of the main feature. The two types of trust + constraints can be applied jointly. + + Trust constraints only affect lattices. When using trapezoid constraints in + ensemble models, note that if a conditional feature is used in a lattice + without the main feature also being used in the same lattice, then the + trapezoid constraint might be violated for the ensemble function. + + Exampes: + + One feature reflecting trust in another: + + ```python + model_config = tfl.configs.CalibratedLatticeConfig( + feature_configs=[ + tfl.configs.FeatureConfig( + name='num_reviews', + reflects_trust_in=[ + configs.TrustConfig( + feature_name='average_rating', trust_type='edgeworth'), + ], + ), + tfl.configs.FeatureConfig( + name='average_rating', + ), + ]) + ``` + + Features can reflect positive or negative trust in other features. For example + if the task is to estimate a property price in a neighborhood given two + average prices for commercial and residential properties, you can use a trust + feature `percentage_commercial_properties` to indicate that the model should + more responsive to commercial estimate if more properties are commercial in + the neighborhood. You can simultaneously have a negative trust constratins for + residential properties, since higher commercial land usage indicates fewer + houses, hence less market influence and less accurate estimate for residential + property prices. + + ```python + model_config = tfl.configs.CalibratedLatticeConfig( + feature_configs=[ + tfl.configs.FeatureConfig( + name='percentage_commercial_properties', + reflects_trust_in=[ + configs.TrustConfig( + feature_name='average_commercial_property_price', + direction='positive'), + configs.TrustConfig( + feature_name='average_residential_property_price', + direction='negative'), + ], + ), + tfl.configs.FeatureConfig( + name='average_commercial_property_price', + ), + tfl.configs.FeatureConfig( + name='average_residential_property_price', + ), + tfl.configs.FeatureConfig( + name='square_footage', + ), + ... + ]) + ``` + """ + _HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks. + + def __init__(self, + feature_name, + trust_type='edgeworth', + direction='positive'): + """Initializes a `TrustConfig` instance. + + Args: + feature_name: Name of the "main" feature for the trust constraint. + trust_type: Type of trust constraint. Either `'edgeworth'` or + `'trapezoid'`. + direction: Direction of the trust. Should be: `'positive'`, `'negative'`, + 1 or -1. + """ + super(TrustConfig, self).__init__(locals()) + + +class DominanceConfig(_Config): + """Configuration for dominance constraints in TFL canned estimators. + + You can specify how a feature dominantes another feature. Supported dominance + types (see `tfl.lattice_layer.Lattice` and `tfl.linear_layer.Linear` for + details): + + - `'monotonic'`: Monotonic dominance constrains the function to require the + effect (slope) in the direction of the *dominant* dimension to be greater + than that of the *weak* dimension for any point in both lattice and linear + models. Both dominant and weak dimensions must be monotonic. The + constraint is guranteed to satisfy at the end of training for linear + models, but might not be strictly satisified for lattice models. In such + cases, increase the number of projection iterations. + + Example: + + ```python + model_config = tfl.configs.CalibratedLatticeConfig( + feature_configs=[ + tfl.configs.FeatureConfig( + name='num_purchases', + dominates=[ + configs.DominanceConfig( + feature_name='num_clicks', trust_type='monotonic'), + ], + ), + tfl.configs.FeatureConfig( + name='num_clicks', + ), + ]) + ``` + """ + _HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks. + + def __init__(self, feature_name, dominance_type='monotonic'): + """Initializes a `DominanceConfig` instance. + + Args: + feature_name: Name of the `"dominant"` feature for the dominance + constraint. + dominance_type: Type of dominance constraint. Currently, supports + `'monotonic'`. + """ + super(DominanceConfig, self).__init__(locals()) + + +class _TypeDict(collections.defaultdict): + """Type dict that defaults to string type for hparams.""" + + def __init__(self, hparams): + super(_TypeDict, + self).__init__(lambda: str, + {k: type(v) for k, v in hparams.values().items()}) + + def __contains__(self, _): + return True + + +def apply_updates(model_config, updates): + """Updates a model config with the given set of (key, values) updates. + + Any value passed in the updates that matches a field of the config will be + applied to the config. Nested configs can be updated as follows: to add/update + a field `FIELD` in feature config for feature `FEATURE`, use + `feature__FEATURE__FIELD` as the key. To add/update a field `FIELD` for + regularizer with name `REGULARIZER` use `regularizer__REGULARIZER__FIELD` as + the key. This naming scheme can be nested. When possible, string values will + be converted to the corresponding value type in the model config. + + Example: + + ```python + model_config = ... + updates = [ + ('output_max', 1), + ('regularizer__torsion__l1', 0.001), + ('feature__some_feature_name__lattice_size', 4), + ('feature__some_feature_name__regularizer__calib_hessian__l2', 0.001), + ('unrelated_haparam_not_affecting_model_config', 42), + ] + configs.apply_updates(model_config, updates) + ``` + + Arguments: + model_config: The model config object to apply the updates to. + updates: A list of (key, value) pairs with potential config updates. Values + that are not matched to a field in the model config will be ignored. + + Returns: + Number of updates that are applied to the model config. + """ + applied_updates = 0 + for k, v in updates: + if _apply_update(model_config, k, v): + applied_updates += 1 + logging.info('Updated model config with %s=%s', k, str(v)) + return applied_updates + + +def _apply_update(node, k, v): + """Applies k, v updates to the given config node. See apply_updates.""" + while '__' in k: + parts = k.split('__', 2) + if len(parts) != 3: + return False + prefix, child_node_name, k = parts + if (prefix == _HPARAM_FEATURE_PREFIX and + isinstance(node, _HasFeatureConfigs)): + node = node.feature_config_by_name(child_node_name) + elif (prefix == _HPARAM_REGULARIZER_PREFIX and + isinstance(node, _HasRegularizerConfigs)): + node = node.regularizer_config_by_name(child_node_name) + else: + return False + + if hasattr(node, k): + if isinstance(v, str): + current_value = getattr(node, k) + if current_value is None: + raise ValueError( + 'Field `{}` has None value and can not be overridden by the ' + 'hparams string value `{}` since the type cannot be inferred. An ' + 'initial value must be set for the field to use string hparams.' + .format(k, v)) + v = type(current_value)(v) + + setattr(node, k, v) + return True + + return False diff --git a/tensorflow_lattice/python/configs_test.py b/tensorflow_lattice/python/configs_test.py new file mode 100644 index 0000000..a0a0a63 --- /dev/null +++ b/tensorflow_lattice/python/configs_test.py @@ -0,0 +1,126 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for TFL model configuration library.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow as tf +from tensorflow_lattice.python import configs + + +class ConfigsTest(tf.test.TestCase): + + def test_updates(self): + model_config = configs.CalibratedLatticeConfig( + output_min=0, + regularizer_configs=[ + configs.RegularizerConfig(name='torsion', l2=2e-3), + ], + feature_configs=[ + configs.FeatureConfig( + name='feature_a', + pwl_calibration_input_keypoints='quantiles', + pwl_calibration_num_keypoints=8, + monotonicity=1, + pwl_calibration_clip_max=100, + ), + configs.FeatureConfig( + name='feature_b', + lattice_size=3, + unimodality='valley', + pwl_calibration_input_keypoints='uniform', + pwl_calibration_num_keypoints=5, + pwl_calibration_clip_min=130, + pwl_calibration_convexity='convex', + regularizer_configs=[ + configs.RegularizerConfig(name='calib_hessian', l2=3e-3), + ], + ), + configs.FeatureConfig( + name='feature_c', + pwl_calibration_input_keypoints=[0.0, 0.5, 1.0], + reflects_trust_in=[ + configs.TrustConfig(feature_name='feature_a'), + configs.TrustConfig(feature_name='feature_b', direction=-1), + ], + ), + configs.FeatureConfig( + name='feature_d', + num_buckets=3, + vocabulary_list=['a', 'b', 'c'], + default_value=-1, + ), + ]) + + updates = [ + # Update values can be passed in as numbers. + ('output_max', 1.0), # update + ('regularizer__torsion__l2', 0.004), # update + ('regularizer__calib_hessian__l1', 0.005), # insert + ('feature__feature_a__lattice_size', 3), # update + ('feature__feature_e__lattice_size', 4), # insert + # Update values can be strings. + ('unrelated_hparams_not_affecting_config', 'unrelated'), + ('feature__feature_a__regularizer__calib_wrinkle__l1', '0.6'), # insert + ('feature__feature_b__regularizer__calib_hessian__l1', '0.7'), # update + ('yet__another__unrelated_config', '4'), + ] + self.assertEqual(configs.apply_updates(model_config, updates), 7) + + model_config.feature_config_by_name('feature_a').monotonicity = 'none' + model_config.feature_config_by_name('feature_f').num_buckets = 4 # insert + + feature_names = [ + feature_config.name for feature_config in model_config.feature_configs + ] + expected_feature_names = [ + 'feature_a', 'feature_b', 'feature_c', 'feature_d', 'feature_e', + 'feature_f' + ] + self.assertCountEqual(feature_names, expected_feature_names) + + global_regularizer_names = [ + regularizer_config.name + for regularizer_config in model_config.regularizer_configs + ] + expected_global_regularizer_names = ['torsion', 'calib_hessian'] + self.assertCountEqual(global_regularizer_names, + expected_global_regularizer_names) + + self.assertEqual(model_config.output_max, 1.0) + self.assertEqual( + model_config.feature_config_by_name('feature_a').lattice_size, 3) + self.assertEqual( + model_config.feature_config_by_name( + 'feature_b').pwl_calibration_convexity, 'convex') + self.assertEqual( + model_config.feature_config_by_name('feature_e').lattice_size, 4) + self.assertEqual( + model_config.regularizer_config_by_name('torsion').l2, 0.004) + self.assertEqual( + model_config.regularizer_config_by_name('calib_hessian').l1, 0.005) + self.assertEqual( + model_config.feature_config_by_name( + 'feature_a').regularizer_config_by_name('calib_wrinkle').l1, 0.6) + self.assertEqual( + model_config.feature_config_by_name( + 'feature_b').regularizer_config_by_name('calib_hessian').l1, 0.7) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensorflow_lattice/python/estimators.py b/tensorflow_lattice/python/estimators.py new file mode 100644 index 0000000..5ebeab5 --- /dev/null +++ b/tensorflow_lattice/python/estimators.py @@ -0,0 +1,2264 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""TF Lattice canned estimators implement typical monotonic model architectures. + +You can use TFL canned estimators to easily construct commonly used monotonic +model architectures. To construct a TFL canned estimator, construct a model +configuration from `tfl.configs` and pass it to the canned estimator +constructor. To use automated quantile calculation, canned estimators also +require passing a *feature_analysis_input_fn* which is similar to the one used +for training, but with a single epoch or a subset of the data. To create a +Crystals ensemble model using `tfl.configs.CalibratedLatticeEnsembleConfig`, you +will also need to provide a *prefitting_input_fn* to the estimator constructor. + +```python +feature_columns = ... +model_config = tfl.configs.CalibratedLatticeConfig(...) +feature_analysis_input_fn = create_input_fn(num_epochs=1, ...) +train_input_fn = create_input_fn(num_epochs=100, ...) +estimator = tfl.estimators.CannedClassifier( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=feature_analysis_input_fn) +estimator.train(input_fn=train_input_fn) +``` + +Supported models are defined in `tfl.configs`. Each model architecture can be +used for: + +* **Classification** using `tfl.estimators.CannedClassifier` with standard + classification head (softmax cross-entropy loss). + +* **Regression** using `tfl.estimators.CannedRegressor` with standard + regression head (squared loss). + +* **Custom head** using `tfl.estimators.CannedEstimator` with any custom head + and loss. + +This module also provides `tfl.estimators.get_model_graph` as a mechanism to +extract abstract model graphs and layer parameters from saved models. The +resulting graph (not a TF graph) can be used by the `tfl.visualization` module +for plotting and other visualization and analysis. + +```python +model_graph = estimators.get_model_graph(saved_model_path) +visualization.plot_feature_calibrator(model_graph, "feature_name") +visualization.plot_all_calibrators(model_graph) +visualization.draw_model_graph(model_graph) +``` + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import copy +import itertools +import json +import os +import re +import time + +from . import categorical_calibration_layer +from . import configs +from . import lattice_layer +from . import lattice_lib +from . import linear_layer +from . import model_info +from . import pwl_calibration_layer +from . import pwl_calibration_lib + +from absl import logging +import enum +import numpy as np +import six +import tensorflow as tf + +from tensorflow.python.feature_column import feature_column as fc # pylint: disable=g-direct-tensorflow-import +from tensorflow.python.feature_column import feature_column_v2 as fc2 # pylint: disable=g-direct-tensorflow-import +from tensorflow.python.keras.utils import losses_utils # pylint: disable=g-direct-tensorflow-import +from tensorflow.python.training import training_util # pylint: disable=g-direct-tensorflow-import +from tensorflow_estimator.python.estimator import estimator as estimator_lib +from tensorflow_estimator.python.estimator.canned import optimizers +from tensorflow_estimator.python.estimator.head import head_utils +from tensorflow_estimator.python.estimator.head import regression_head + +# TODO: support multi dim inputs. +# TODO: support multi dim output. +# TODO: add linear layer regularizers. +# TODO: add examples in docs. +# TODO: make _REPEATED_PAIR_DISCOUNT_IN_CRYSTALS_SCORE config param + +# Layer names used for layers in the canned models. +INPUT_LAYER_NAME = 'tfl_input' +CALIB_LAYER_NAME = 'tfl_calib' +LATTICE_LAYER_NAME = 'tfl_lattice' +LINEAR_LAYER_NAME = 'tfl_linear' +OUTPUT_CALIB_LAYER_NAME = 'tfl_output_calib' + +# Prefix for passthrough (identity) nodes for shared calibration. +# These nodes pass shared calibrated values to submodels in an ensemble. +CALIB_PASSTHROUGH_NAME = 'tfl_calib_passthrough' + +# Feed and fetch names for the model. +FEATURES_SCOPE = 'features' +OUTPUT_NAME = 'output' + +# File to store and load feature keypoints. +_KEYPOINTS_FILE = 'keypoints.json' + +# File to store and load lattice ensemble structure. +_ENSEMBLE_STRUCTURE_FILE = 'ensemble_structure.json' + +# Name for label keypoints in keypoints file. +_LABEL_FEATURE_NAME = '__label__' + +# Prefix for defining feature calibrator regularizers. +_INPUT_CALIB_REGULARIZER_PREFIX = 'calib_' + +# Prefix for defining output calibrator regularizers. +_OUTPUT_CALIB_REGULARIZER_PREFIX = 'output_calib_' + +# Pooling interval and maximum wait time for workers waiting for files. +_MAX_WAIT_TIME = 1200 +_POLL_INTERVAL_SECS = 10 + +# Weight of laplacian in feature importance for the crystal algorithm. +_LAPLACIAN_WEIGHT_IN_IMPORTANCE = 6.0 + +# Discount amount for repeated co-occurrence of pairs of features in crystals. +_REPEATED_PAIR_DISCOUNT_IN_CRYSTALS_SCORE = 0.5 + +# Maximum number of swaps for the crystals algorithm. +_MAX_CRYSTALS_SWAPS = 1000 + + +class WaitTimeOutError(Exception): + """Timeout error when waiting for a file.""" + pass + + +def _poll_for_file(filename): + """Waits and polls for a file until it exists.""" + start = time.time() + while not tf.io.gfile.exists(filename): + time.sleep(_POLL_INTERVAL_SECS) + if time.time() - start > _MAX_WAIT_TIME: + raise WaitTimeOutError('Waiting for file {} timed-out'.filename) + + +def transform_features(features, feature_columns=None): + """Parses the input features using the given feature columns. + + This function can be used to parse input features when constructing a custom + estimator. When using this function, you will not need to wrap categorical + features with dense feature embeddings, and the resulting tensors will not be + concatenated, making it easier to use the features in the calibration layers. + + Args: + features: A dict from feature names to tensors. + feature_columns: A list of FeatureColumn objects to be used for parsing. If + not provided, the input features are assumed to be already parsed. + + Returns: + collections.OrderedDict mapping feature names to parsed tensors. + """ + with tf.name_scope('transform'): + if feature_columns: + parsed_features = collections.OrderedDict() + for feature_column in feature_columns: + # pylint: disable=protected-access + if (isinstance(feature_column, fc._DenseColumn) or + isinstance(feature_column, fc2.DenseColumn)): + parsed_features[ + feature_column.name] = feature_column._transform_feature(features) + elif (isinstance(feature_column, fc._CategoricalColumn) or + isinstance(feature_column, fc2.CategoricalColumn)): + parsed_features[ + feature_column.name] = feature_column._transform_feature( + features).values + else: + raise ValueError( + 'Unsupported feature_column: {}'.format(feature_column)) + # pylint: enable=protected-access + else: + parsed_features = collections.OrderedDict(features) + + for name, tensor in parsed_features.items(): + if len(tensor.shape) == 1: + parsed_features[name] = tf.expand_dims(tensor, 1) + elif len(tensor.shape) > 2 or tensor.shape[1] != 1: + raise ValueError('Only 1-d inputs are supported: {}'.format(tensor)) + + with tf.name_scope(FEATURES_SCOPE): + for name, tensor in parsed_features.items(): + parsed_features[name] = tf.identity(parsed_features[name], name=name) + + return parsed_features + + +def _materialize_locally(tensors, max_elements=1e6): + """Materialize the given tensors locally, during initialization. + + Assumes non-distributed environment (uses SingularMonitoredSession). + + Args: + tensors: A dict of name to feed tensors to be materialized. + max_elements: Data is read and accmulated from tensors until end-of-input is + reached or when we have at least max_elements collected. + + Returns: + Materialized tensors as dict. + """ + # tf.compat.v1.train.SingularMonitoredSession silently catches + # tf.errors.OutOfRangeError, and we want to expose it to detect end of the + # data from the given feed tensors. + with tf.compat.v1.train.SingularMonitoredSession() as sess: + splits = [] + count = 0 + try: + while count < max_elements: + materialized_tensors = sess.run(tensors) + values = list(materialized_tensors.values()) + if not values: + break + count += len(values[0]) + splits.append(materialized_tensors) + except (tf.errors.OutOfRangeError, StopIteration): + pass + concatenated_tensors = {} + for k in tensors: + concatenated_tensors[k] = np.concatenate( + [split[k] for split in splits if split[k].size > 0]) + return concatenated_tensors + + +def _finalize_keypoints(model_config, config, feature_columns, + feature_analysis_input_fn, logits_output): + """Calculates and sets keypoints for input and output calibration. + + Input and label keypoints are calculated, stored in a file and also set in the + model_config to be used for model construction. + + Args: + model_config: Model config to be updated. + config: A `tf.RunConfig` to indicate if worker is chief. + feature_columns: A list of FeatureColumn's to use for feature parsing. + feature_analysis_input_fn: An input_fn used to collect feature statistics. + logits_output: A boolean indicating if model outputs logits. + + Raises: + ValueError: If keypoints mode is invalid. + """ + if not feature_analysis_input_fn: + return + + keypoints_filename = os.path.join(config.model_dir, _KEYPOINTS_FILE) + if ((config is None or config.is_chief) and + not tf.io.gfile.exists(keypoints_filename)): + with tf.Graph().as_default(): + features, label = feature_analysis_input_fn() + features = transform_features(features, feature_columns) + features[_LABEL_FEATURE_NAME] = label + features = _materialize_locally(features) + + feature_keypoints = {} + for feature_name, feature_values in six.iteritems(features): + feature_values = feature_values.flatten() + + if feature_name == _LABEL_FEATURE_NAME: + # Default feature_values to [0, ... n_class-1] if string label. + if label.dtype == tf.string: + feature_values = np.arange(len(set(feature_values))) + num_keypoints = model_config.output_calibration_num_keypoint + keypoints = model_config.output_initialization + clip_min = model_config.output_min + clip_max = model_config.output_max + default_value = None + else: + feature_config = model_config.feature_config_by_name(feature_name) + if feature_config.num_buckets: + # Skip categorical features. + continue + num_keypoints = feature_config.pwl_calibration_num_keypoints + keypoints = feature_config.pwl_calibration_input_keypoints + clip_min = feature_config.pwl_calibration_clip_min + clip_max = feature_config.pwl_calibration_clip_max + default_value = feature_config.default_value + + # Remove default values before calculating stats. + feature_values = feature_values[feature_values != default_value] + + if np.isnan(feature_values).any(): + raise ValueError( + 'NaN values were observed for numeric feature `{}`. ' + 'Consider replacing the values in transform or input_fn.'.format( + feature_name)) + + # Before calculating keypoints, clip values as requested. + # Add min and max to the value list to make sure min/max in values match + # the requested range. + if clip_min is not None: + feature_values = np.maximum(feature_values, clip_min) + feature_values = np.append(feature_values, clip_min) + if clip_max is not None: + feature_values = np.minimum(feature_values, clip_max) + feature_values = np.append(feature_values, clip_max) + + # Remove duplicate values before calculating stats. + feature_values = np.unique(feature_values) + + if isinstance(keypoints, str): + if keypoints == 'quantiles': + if (feature_name != _LABEL_FEATURE_NAME and + feature_values.size < num_keypoints): + logging.info( + 'Not enough unique values observed for feature `%s` to ' + 'construct %d keypoints for pwl calibration. Using %d unique ' + 'values as keypoints.', feature_name, num_keypoints, + feature_values.size) + num_keypoints = feature_values.size + quantiles = np.quantile( + feature_values, + np.linspace(0., 1., num_keypoints), + interpolation='nearest') + feature_keypoints[feature_name] = [float(x) for x in quantiles] + elif keypoints == 'uniform': + linspace = np.linspace( + np.min(feature_values), np.max(feature_values), num_keypoints) + feature_keypoints[feature_name] = [float(x) for x in linspace] + else: + raise ValueError( + 'Invalid keypoint generation mode: {}'.format(keypoints)) + else: + # Keypoints are explicitly provided in the config. + feature_keypoints[feature_name] = [float(x) for x in keypoints] + + # Save keypoints to file as the chief worker. + tmp_keypoints_filename = keypoints_filename + 'tmp' + with tf.io.gfile.GFile(tmp_keypoints_filename, 'w') as keypoints_file: + keypoints_file.write(json.dumps(feature_keypoints, indent=2)) + tf.io.gfile.rename(tmp_keypoints_filename, keypoints_filename) + else: + # Non-chief workers read the keypoints from file. + _poll_for_file(keypoints_filename) + with tf.io.gfile.GFile(keypoints_filename) as keypoints_file: + feature_keypoints = json.loads(keypoints_file.read()) + + if _LABEL_FEATURE_NAME in feature_keypoints: + output_init = feature_keypoints.pop(_LABEL_FEATURE_NAME) + if logits_output and isinstance(model_config.output_initialization, str): + # If model is expected to produce logits, initialize linearly in the + # range [-2, 2], ignoring the label distribution. + model_config.output_initialization = [ + float(x) for x in np.linspace( + -2, 2, model_config.output_calibration_num_keypoint) + ] + else: + model_config.output_initialization = output_init + + for feature_name, keypoints in feature_keypoints.items(): + model_config.feature_config_by_name( + feature_name).pwl_calibration_input_keypoints = keypoints + + +def _fix_ensemble_for_2d_constraints(model_config, feature_names): + """Fixes 2d constraint violations by adding missing features to some lattices. + + Some 2d shape constraints require lattices from ensemble to either contain + both constrained features or none of them, e.g. trapezoid trust constraint + requires a lattice that has the "conditional" feature to include the "main" + feature. + + Args: + model_config: Model config to be updated. + feature_names: List of feature names. + """ + must_include_features = collections.defaultdict(set) + for feature_name in feature_names: + feature_config = model_config.feature_config_by_name(feature_name) + for trust_config in feature_config.reflects_trust_in or []: + if trust_config.trust_type == 'trapezoid': + must_include_features[feature_name].add(trust_config.feature_name) + for dominance_config in feature_config.dominates or []: + must_include_features[dominance_config.feature_name].add(feature_name) + + fixed_lattices = [] + for idx, lattice in enumerate(model_config.lattices): + fixed_lattice = set() + for feature_name in lattice: + fixed_lattice.add(feature_name) + fixed_lattice.update(must_include_features[feature_name]) + assert len(lattice) <= len(fixed_lattice) + fixed_lattices.append(list(fixed_lattice)) + if len(lattice) < len(fixed_lattice): + logging.info( + 'Fixed 2d constraint violations in lattices[%d]. Lattice rank ' + 'increased from %d to %d.', idx, len(lattice), len(fixed_lattice)) + + model_config.lattices = fixed_lattices + + +def _set_random_lattice_ensemble(model_config, feature_names): + """Sets random lattice ensemble in the given model_config.""" + # Start by using each feature once. + np.random.seed(model_config.random_seed) + model_config.lattices = [[] for _ in range(model_config.num_lattices)] + for feature_name in feature_names: + non_full_indices = [ + i for (i, lattice) in enumerate(model_config.lattices) + if len(lattice) < model_config.lattice_rank + ] + model_config.lattices[np.random.choice(non_full_indices)].append( + feature_name) + + # Fill up lattices avoiding repeated features. + for lattice in model_config.lattices: + feature_names_not_in_lattice = [ + feature_name for feature_name in feature_names + if feature_name not in lattice + ] + remaining_size = model_config.lattice_rank - len(lattice) + lattice.extend( + np.random.choice( + feature_names_not_in_lattice, size=remaining_size, replace=False)) + + +def _add_pair_to_ensemble(lattices, lattice_rank, i, j): + """Adds pair (i, j) to the ensemble heuristically.""" + # First check if (i, j) pair is already present in a lattice. + for lattice in lattices: + if i in lattice and j in lattice: + return + + # Try adding to a lattice that already has either i or j. + for lattice in lattices: + if len(lattice) < lattice_rank: + if i in lattice: + lattice.add(j) + return + if j in lattice: + lattice.add(i) + return + + # Add both i and j to a lattice that has enough space left. + for lattice in lattices: + if len(lattice) < lattice_rank - 1: + lattice.add(i) + lattice.add(j) + return + + # Create a new lattice with pair (i, j). + lattices.append(set([i, j])) + + +def _set_all_pairs_cover_lattices(prefitting_model_config, feature_names): + """Sets prefitting lattice ensemble such that it covers all feature pairs.""" + # Pairs of co-occurrence that need to exist in the all-pairs cover. + to_cover = list(itertools.combinations(range(len(feature_names)), 2)) + np.random.seed(prefitting_model_config.random_seed) + np.random.shuffle(to_cover) + + lattices = [] + + for (i, j) in to_cover: + _add_pair_to_ensemble(lattices, prefitting_model_config.lattice_rank, i, j) + + prefitting_model_config.lattices = [ + [feature_names[i] for i in lattice] for lattice in lattices + ] + + +def _get_torsions_and_laplacians(prefitting_model_config, prefitting_estimator, + feature_names): + """Returns average torsion and laplacian regularizers in prefitted model.""" + num_fatures = len(feature_names) + laplacians = [[] for _ in range(num_fatures)] + torsions = [[[] for _ in range(num_fatures)] for _ in range(num_fatures)] + for (lattice_index, lattice) in enumerate(prefitting_model_config.lattices): + # Get normalized lattice weights. + lattice_kernel_variable_name = '{}_{}/{}'.format( + LATTICE_LAYER_NAME, lattice_index, lattice_layer.LATTICE_KERNEL_NAME) + weights = prefitting_estimator.get_variable_value( + lattice_kernel_variable_name) + weights -= np.min(weights) + weights /= np.max(weights) + weights = tf.constant(weights) + + # Convert feature names in the lattice to their index in feature_names. + lattice = [feature_names.index(feature_name) for feature_name in lattice] + lattice_sizes = [2] * len(lattice) + # feature_* refers to feature index in feature_names. + # within_lattice_index_* is the index of input dimenstion of the lattice. + for within_lattice_index_0, feature_0 in enumerate(lattice): + l2 = [0] * len(lattice) + l2[within_lattice_index_0] = 1 + laplacians[feature_0].append( + lattice_lib.laplacian_regularizer( + weights=weights, lattice_sizes=lattice_sizes, l2=l2)) + for within_lattice_index_1, feature_1 in enumerate(lattice): + if within_lattice_index_1 > within_lattice_index_0: + l2 = [0] * len(lattice) + l2[within_lattice_index_0] = 1 + l2[within_lattice_index_1] = 1 + torsion = lattice_lib.torsion_regularizer( + weights=weights, lattice_sizes=lattice_sizes, l2=l2) + torsions[feature_0][feature_1].append(torsion) + torsions[feature_1][feature_0].append(torsion) + + if not tf.executing_eagerly(): + with tf.compat.v1.Session() as sess: + laplacians = sess.run(laplacians) + torsions = sess.run(torsions) + + laplacians = [np.mean(v) for v in laplacians] + torsions = [[np.mean(v) if v else 0.0 for v in row] for row in torsions] + return torsions, laplacians + + +def _set_final_crystal_lattices(model_config, feature_names, + prefitting_model_config, prefitting_estimator): + """Sets the lattice ensemble in model_config based on a prefitted model.""" + torsions, laplacians = _get_torsions_and_laplacians( + prefitting_model_config=prefitting_model_config, + prefitting_estimator=prefitting_estimator, + feature_names=feature_names) + + # Calculate features' importance_score = lambda * laplacians + torsion. + # Used to allocate slots to useful features with more non-linear interactions. + num_features = len(feature_names) + importance_scores = np.array(laplacians) * _LAPLACIAN_WEIGHT_IN_IMPORTANCE + for feature_0, feature_1 in itertools.combinations(range(num_features), 2): + importance_scores[feature_0] += torsions[feature_0][feature_1] + importance_scores[feature_1] += torsions[feature_0][feature_1] + + # Each feature is used at least once, and the remaining slots are distributed + # proportional to the importance_scores. + features_uses = [1] * num_features + total_feature_use = model_config.num_lattices * model_config.lattice_rank + remaining_uses = total_feature_use - num_features + remaining_scores = np.sum(importance_scores) + for feature in np.argsort(-importance_scores): + added_uses = int( + round(remaining_uses * importance_scores[feature] / remaining_scores)) + # Each feature cannot be used more than once in a finalized lattice. + added_uses = min(added_uses, model_config.num_lattices - 1) + features_uses[feature] += added_uses + remaining_uses -= added_uses + remaining_scores -= importance_scores[feature] + assert np.sum(features_uses) == total_feature_use + + # Add features to add list in round-robin order. + add_list = [] + for use in range(1, max(features_uses) + 1): + for feature_index, feature_use in enumerate(features_uses): + if use <= feature_use: + add_list.append(feature_index) + assert len(add_list) == total_feature_use + + # Setup initial lattices that will be optimized by swapping later. + lattices = [[] for _ in range(model_config.num_lattices)] + cooccurrence_counts = [[0] * num_features for _ in range(num_features)] + for feature_to_be_added in add_list: + # List of pairs of (addition_score, candidate_lattice_to_add_to). + score_candidates_pairs = [] + for candidate_lattice_to_add_to in range(model_config.num_lattices): + # addition_score indicates the priority of an addition. + if len( + lattices[candidate_lattice_to_add_to]) >= model_config.lattice_rank: + # going out of bound on the lattice + addition_score = -2.0 + elif feature_to_be_added in lattices[candidate_lattice_to_add_to]: + # repeates (fixed repeats later by swapping) + addition_score = -1.0 + elif not lattices[candidate_lattice_to_add_to]: + # adding a new lattice roughly has an "average" lattice score + addition_score = np.mean(torsions) * model_config.lattice_rank**2 / 2 + else: + # all other cases: change in total discounted torsion after addition. + addition_score = 0.0 + for other_feature in lattices[candidate_lattice_to_add_to]: + addition_score += ( + torsions[feature_to_be_added][other_feature] * + _REPEATED_PAIR_DISCOUNT_IN_CRYSTALS_SCORE + **(cooccurrence_counts[feature_to_be_added][other_feature])) + + score_candidates_pairs.append( + (addition_score, candidate_lattice_to_add_to)) + + # Use the highest scoring addition. + score_candidates_pairs.sort(reverse=True) + best_candidate_lattice_to_add_to = score_candidates_pairs[0][1] + for other_feature in lattices[best_candidate_lattice_to_add_to]: + cooccurrence_counts[feature_to_be_added][other_feature] += 1 + cooccurrence_counts[other_feature][feature_to_be_added] += 1 + lattices[best_candidate_lattice_to_add_to].append(feature_to_be_added) + + # Apply swapping operations to increase within-lattice torsion. + changed = True + iteration = 0 + while changed: + if iteration > _MAX_CRYSTALS_SWAPS: + logging.info('Crystals algorithm did not fully converge.') + break + changed = False + iteration += 1 + for lattice_0, lattice_1 in itertools.combinations(lattices, 2): + # For every pair of lattices: lattice_0, lattice_1 + for index_0, index_1 in itertools.product( + range(len(lattice_0)), range(len(lattice_1))): + # Consider swapping lattice_0[index_0] with lattice_1[index_1] + rest_lattice_0 = list(lattice_0) + rest_lattice_1 = list(lattice_1) + feature_0 = rest_lattice_0.pop(index_0) + feature_1 = rest_lattice_1.pop(index_1) + if feature_0 == feature_1: + continue + + # Calculate the change in the overall discounted sum of torsion terms. + added_cooccurrence = set( + [tuple(sorted((feature_1, other))) for other in rest_lattice_0] + + [tuple(sorted((feature_0, other))) for other in rest_lattice_1]) + removed_cooccurrence = set( + [tuple(sorted((feature_0, other))) for other in rest_lattice_0] + + [tuple(sorted((feature_1, other))) for other in rest_lattice_1]) + wash = added_cooccurrence.intersection(removed_cooccurrence) + added_cooccurrence = added_cooccurrence.difference(wash) + removed_cooccurrence = removed_cooccurrence.difference(wash) + swap_diff_torsion = ( + sum(torsions[i][j] * _REPEATED_PAIR_DISCOUNT_IN_CRYSTALS_SCORE** + cooccurrence_counts[i][j] for (i, j) in added_cooccurrence) - + sum(torsions[i][j] * _REPEATED_PAIR_DISCOUNT_IN_CRYSTALS_SCORE** + (cooccurrence_counts[i][j] - 1) + for (i, j) in removed_cooccurrence)) + + # Swap if a feature is repeated or if the score change is positive. + if (feature_0 not in lattice_1 and feature_1 not in lattice_0 and + (lattice_0.count(feature_0) > 1 or lattice_1.count(feature_1) > 1 or + swap_diff_torsion > 0)): + for (i, j) in added_cooccurrence: + cooccurrence_counts[i][j] += 1 + cooccurrence_counts[j][i] += 1 + for (i, j) in removed_cooccurrence: + cooccurrence_counts[i][j] -= 1 + cooccurrence_counts[j][i] -= 1 + lattice_0[index_0], lattice_1[index_1] = (lattice_1[index_1], + lattice_0[index_0]) + changed = True + + model_config.lattices = [[ + feature_names[features_index] for features_index in lattice + ] for lattice in lattices] + + +def _set_crystals_lattice_ensemble(model_config, feature_names, label_dimension, + feature_columns, head, prefitting_input_fn, + prefitting_optimizer, prefitting_steps, + config, dtype): + """Sets the lattice ensemble in model_config using the crystals algorithm.""" + if prefitting_input_fn is None: + raise ValueError('prefitting_input_fn must be set for crystals models') + + prefitting_model_config = copy.deepcopy(model_config) + _set_all_pairs_cover_lattices( + prefitting_model_config=prefitting_model_config, + feature_names=feature_names) + + # Trim the model for faster prefitting. + for feature_config in prefitting_model_config.feature_configs: + feature_config.lattice_size = 2 + # Unimodality requires lattice_size > 2. + feature_config.unimodality = 0 + + def prefitting_model_fn(features, labels, mode, config): + return _calibrated_lattice_ensemble_model_fn( + features=features, + labels=labels, + label_dimension=label_dimension, + feature_columns=feature_columns, + mode=mode, + head=head, + model_config=prefitting_model_config, + optimizer=prefitting_optimizer, + config=config, + dtype=dtype) + + config = tf.estimator.RunConfig( + keep_checkpoint_max=1, + save_summary_steps=0, + save_checkpoints_steps=10000000, + tf_random_seed=config.tf_random_seed if config is not None else 42) + logging.info('Creating the prefitting estimator.') + prefitting_estimator = tf.estimator.Estimator( + model_fn=prefitting_model_fn, config=config) + logging.info('Training the prefitting estimator.') + prefitting_estimator.train( + input_fn=prefitting_input_fn, steps=prefitting_steps) + _set_final_crystal_lattices( + feature_names=feature_names, + model_config=model_config, + prefitting_model_config=prefitting_model_config, + prefitting_estimator=prefitting_estimator) + logging.info('Finished training the prefitting estimator.') + + # Cleanup model_dir since we might be reusing it for the main estimator. + # Note that other workers are blocked until model structure file is + # generated by the chief worker, so modifying files here should be safe. + remove_list = [ + os.path.join(prefitting_estimator.model_dir, 'graph.pbtxt'), + os.path.join(prefitting_estimator.model_dir, 'checkpoint'), + ] + remove_list.extend( + tf.io.gfile.glob(prefitting_estimator.latest_checkpoint() + '*')) + for file_path in remove_list: + tf.io.gfile.remove(file_path) + + +def _finalize_model_structure(model_config, label_dimension, feature_columns, + head, prefitting_input_fn, prefitting_optimizer, + prefitting_steps, model_dir, config, + warm_start_from, dtype): + """Sets up the lattice ensemble in model_config with requested algorithm.""" + if (not isinstance(model_config, configs.CalibratedLatticeEnsembleConfig) or + isinstance(model_config.lattices, list)): + return + + # TODO: If warmstarting, look for the previous ensemble file. + if warm_start_from: + raise ValueError('Warm starting lattice ensembles without explicitly ' + 'defined lattices is not supported yet.') + + if feature_columns: + feature_names = [feature_column.name for feature_column in feature_columns] + else: + feature_names = [ + feature_config.name for feature_config in model_config.feature_configs + ] + + if model_config.lattice_rank > len(feature_names): + raise ValueError( + 'lattice_rank {} cannot be larger than the number of features: {}' + .format(model_config.lattice_rank, feature_names)) + + if model_config.num_lattices * model_config.lattice_rank < len(feature_names): + raise ValueError( + 'Model with {}x{}d lattices is not large enough for all features: {}' + .format(model_config.num_lattices, model_config.lattice_rank, + feature_names)) + + ensemble_structure_filename = os.path.join(model_dir, + _ENSEMBLE_STRUCTURE_FILE) + if ((config is None or config.is_chief) and + not tf.io.gfile.exists(ensemble_structure_filename)): + if model_config.lattices == 'random': + _set_random_lattice_ensemble( + model_config=model_config, feature_names=feature_names) + elif model_config.lattices == 'crystals': + _set_crystals_lattice_ensemble( + feature_names=feature_names, + label_dimension=label_dimension, + feature_columns=feature_columns, + head=head, + model_config=model_config, + prefitting_input_fn=prefitting_input_fn, + prefitting_optimizer=prefitting_optimizer, + prefitting_steps=prefitting_steps, + config=config, + dtype=dtype) + else: + raise ValueError('Unsupported ensemble structure: {}'.format( + model_config.lattices)) + if model_config.fix_ensemble_for_2d_constraints: + _fix_ensemble_for_2d_constraints(model_config, feature_names) + + # Save lattices to file as the chief worker. + tmp_ensemble_structure_filename = ensemble_structure_filename + 'tmp' + with tf.io.gfile.GFile(tmp_ensemble_structure_filename, + 'w') as ensemble_structure_file: + ensemble_structure_file.write(json.dumps(model_config.lattices, indent=2)) + tf.io.gfile.rename(tmp_ensemble_structure_filename, + ensemble_structure_filename) + else: + # Non-chief workers read the lattices from file. + _poll_for_file(ensemble_structure_filename) + with tf.io.gfile.GFile( + ensemble_structure_filename) as ensemble_structure_file: + model_config.lattices = json.loads(ensemble_structure_file.read()) + + logging.info('Finalized model structure: %s', str(model_config.lattices)) + + +def _verify_config(model_config, feature_columns): + """Verifies that the config is setup correctly and ready for model_fn.""" + if feature_columns: + feature_configs = [ + model_config.feature_config_by_name(feature_column.name) + for feature_column in feature_columns + ] + else: + feature_configs = model_config.feature_configs or [] + + for feature_config in feature_configs: + if not feature_config.num_buckets: + if (not np.iterable(feature_config.pwl_calibration_input_keypoints) or + any(not isinstance(x, float) + for x in feature_config.pwl_calibration_input_keypoints)): + raise ValueError( + 'Input keypoints are invalid for feature {}: {}'.format( + feature_config.name, + feature_config.pwl_calibration_input_keypoints)) + + if (not np.iterable(model_config.output_initialization) or any( + not isinstance(x, float) for x in model_config.output_initialization)): + raise ValueError('Output initilization is invalid: {}'.format( + model_config.output_initialization)) + + +def _update_by_feature_columns(model_config, feature_columns): + """Updates a model config with the given feature columns.""" + for feature_column in feature_columns or []: + feature_config = model_config.feature_config_by_name(feature_column.name) + # pylint: disable=protected-access + if (isinstance(feature_column, fc._DenseColumn) or + isinstance(feature_column, fc2.DenseColumn)): + feature_config.default_value = feature_column.default_value + elif (isinstance(feature_column, fc._VocabularyListCategoricalColumn) or + isinstance(feature_column, fc2.VocabularyListCategoricalColumn)): + feature_config.vocabulary_list = feature_column.vocabulary_list + feature_config.num_buckets = feature_column.num_buckets + if feature_column.num_oov_buckets: + feature_config.default_value = None + else: + # We add a bucket at the end for the default_value, since num_buckets + # does not include the default value (but includes oov buckets). + feature_config.default_value = feature_column.default_value + feature_config.num_buckets += 1 + else: + raise ValueError('Unsupported feature_column: {}'.format(feature_column)) + # pylint: enable=protected-access + + # Change categorical monotonicities to indices. + if (feature_config.num_buckets and + isinstance(feature_config.monotonicity, list)): + if not feature_config.vocabulary_list: + raise ValueError('Vocabulary list must be provided to use categorical' + 'monotonicities.') + if not all( + isinstance(m, tuple) and len(m) == 2 + for m in feature_config.monotonicity): + raise ValueError( + 'Monotonicities should be a list of pairs (tuples): {}'.format( + feature_config.monotonicity)) + indexed_monotonicities = [] + index_map = { + category: index + for (index, category) in enumerate(feature_config.vocabulary_list) + } + if feature_config.default_value is not None: + index_map[feature_config.default_value] = feature_config.num_buckets - 1 + for left, right in feature_config.monotonicity: + for category in [left, right]: + if category not in index_map: + raise ValueError( + 'Category `{}` not found in vocabulary list for feature `{}`' + .format(category, feature_config.name)) + indexed_monotonicities.append((index_map[left], index_map[right])) + + feature_config.monotonicity = indexed_monotonicities + + +def _input_calibration_regularizers(model_config, feature_config): + """Returns pwl layer regularizers defined in the model and feature configs.""" + regularizer_configs = [] + regularizer_configs.extend(feature_config.regularizer_configs or []) + regularizer_configs.extend(model_config.regularizer_configs or []) + return [(r.name.replace(_INPUT_CALIB_REGULARIZER_PREFIX, ''), r.l1, r.l2) + for r in regularizer_configs + if r.name.startswith(_INPUT_CALIB_REGULARIZER_PREFIX)] + + +def _output_calibration_regularizers(model_config): + """Returns output calibration regularizers defined in the model config.""" + return [(r.name.replace(_OUTPUT_CALIB_REGULARIZER_PREFIX, ''), r.l1, r.l2) + for r in model_config.regularizer_configs or [] + if r.name.startswith(_OUTPUT_CALIB_REGULARIZER_PREFIX)] + + +def _lattice_regularizers(model_config, feature_configs): + """Returns lattice regularizers defined in the model and feature configs.""" + # dict from regularizer name to pair of per feature l1 and l2 amounts. + regularizers_dict = {} + n_dims = len(feature_configs) + for index, feature_config in enumerate(feature_configs): + for regularizer_config in feature_config.regularizer_configs or []: + if not ( + regularizer_config.name.startswith(_INPUT_CALIB_REGULARIZER_PREFIX) or + regularizer_config.name.startswith(_OUTPUT_CALIB_REGULARIZER_PREFIX)): + if regularizer_config.name not in regularizers_dict: + regularizers_dict[regularizer_config.name] = ([0.0] * n_dims, + [0.0] * n_dims) + regularizers_dict[ + regularizer_config.name][0][index] += regularizer_config.l1 + regularizers_dict[ + regularizer_config.name][1][index] += regularizer_config.l2 + + regularizers = [(k,) + v for k, v in regularizers_dict.items()] + + for regularizer_config in model_config.regularizer_configs or []: + if not ( + regularizer_config.name.startswith(_INPUT_CALIB_REGULARIZER_PREFIX) or + regularizer_config.name.startswith(_OUTPUT_CALIB_REGULARIZER_PREFIX)): + regularizers.append((regularizer_config.name, regularizer_config.l1, + regularizer_config.l2)) + return regularizers + + +class _LayerOutputRange(enum.Enum): + """Enum to indicate the output range based on the input of the next layers.""" + MODEL_OUTPUT = 1 + INPUT_TO_LATTICE = 2 + INPUT_TO_FINAL_CALIBRATION = 3 + + +def _output_range(layer_output_range, model_config, feature_config=None): + """Returns min/max/init_min/init_max for a given output range.""" + if layer_output_range == _LayerOutputRange.INPUT_TO_LATTICE: + if feature_config is None: + raise ValueError('Expecting feature config for lattice inputs.') + output_init_min = output_min = 0.0 + output_init_max = output_max = feature_config.lattice_size - 1.0 + elif layer_output_range == _LayerOutputRange.MODEL_OUTPUT: + output_min = model_config.output_min + output_max = model_config.output_max + output_init_min = np.min(model_config.output_initialization) + output_init_max = np.max(model_config.output_initialization) + elif layer_output_range == _LayerOutputRange.INPUT_TO_FINAL_CALIBRATION: + output_init_min = output_min = 0.0 + output_init_max = output_max = 1.0 + else: + raise ValueError('Unsupported layer output range.') + return output_min, output_max, output_init_min, output_init_max + + +def _input_layer(feature_configs, dtype): + """Creates a calibration layer.""" + input_layer = {} + for feature_config in feature_configs: + layer_name = '{}_{}'.format(INPUT_LAYER_NAME, feature_config.name) + if feature_config.num_buckets: + input_layer[feature_config.name] = tf.keras.Input( + shape=(1,), dtype=tf.int32, name=layer_name) + else: + input_layer[feature_config.name] = tf.keras.Input( + shape=(1,), dtype=dtype, name=layer_name) + return input_layer + + +def _calibration_layers(calibration_input_layer, feature_configs, model_config, + layer_output_range, submodels, separate_calibrators, + dtype): + """Creates a calibration layer for `submodels` as list of list of features.""" + # Create a list of (feature_name, calibration_output_idx) pairs for each + # submodel. When using shared calibration, all submodels will have + # calibration_output_idx = 0. + submodels_input_features = [] + calibration_last_index = collections.defaultdict(int) + for submodel in submodels: + submodel_input_features = [] + submodels_input_features.append(submodel_input_features) + for feature_name in submodel: + submodel_input_features.append( + (feature_name, calibration_last_index[feature_name])) + if separate_calibrators: + calibration_last_index[feature_name] += 1 + + calibration_output = {} + for feature_config in feature_configs: + feature_name = feature_config.name + units = max(calibration_last_index[feature_name], 1) + calibration_input = calibration_input_layer[feature_name] + layer_name = '{}_{}'.format(CALIB_LAYER_NAME, feature_name) + + (output_min, output_max, output_init_min, + output_init_max) = _output_range(layer_output_range, model_config, + feature_config) + + if feature_config.num_buckets: + kernel_initializer = tf.compat.v1.random_uniform_initializer( + output_init_min, output_init_max) + calibrated = ( + categorical_calibration_layer.CategoricalCalibration( + num_buckets=feature_config.num_buckets, + units=units, + output_min=output_min, + output_max=output_max, + kernel_initializer=kernel_initializer, + monotonicities=feature_config.monotonicity if isinstance( + feature_config.monotonicity, list) else None, + default_input_value=feature_config.default_value, + dtype=dtype, + name=layer_name)(calibration_input)) + else: + kernel_regularizer = _input_calibration_regularizers( + model_config, feature_config) + monotonicity = feature_config.monotonicity + if (pwl_calibration_lib.canonicalize_monotonicity(monotonicity) == 0 and + feature_config.pwl_calibration_always_monotonic): + monotonicity = 1 + kernel_initializer = pwl_calibration_layer.UniformOutputInitializer( + output_min=output_init_min, + output_max=output_init_max, + monotonicity=monotonicity) + calibrated = ( + pwl_calibration_layer.PWLCalibration( + units=units, + input_keypoints=feature_config.pwl_calibration_input_keypoints, + output_min=output_min, + output_max=output_max, + clamp_min=feature_config.pwl_calibration_clamp_min, + clamp_max=feature_config.pwl_calibration_clamp_max, + missing_input_value=feature_config.default_value, + impute_missing=(feature_config.default_value is not None), + kernel_initializer=kernel_initializer, + kernel_regularizer=kernel_regularizer, + monotonicity=monotonicity, + convexity=feature_config.pwl_calibration_convexity, + dtype=dtype, + name=layer_name)(calibration_input)) + if units == 1: + calibration_output[feature_name] = [calibrated] + else: + calibration_output[feature_name] = tf.split(calibrated, units, axis=1) + + # Create passthrough nodes for each submodel input so that we can recover + # the model structure for plotting and analysis. + # {CALIB_PASSTHROUGH_NAME}_{feature_name}_ + # {calibration_output_idx}_{submodel_idx}_{submodel_input_idx} + submodels_inputs = [] + for submodel_idx, submodel_input_features in enumerate( + submodels_input_features): + submodel_inputs = [] + submodels_inputs.append(submodel_inputs) + for (submodel_input_idx, + (feature_name, + calibration_output_idx)) in enumerate(submodel_input_features): + passthrough_name = '{}_{}_{}_{}_{}'.format(CALIB_PASSTHROUGH_NAME, + feature_name, + calibration_output_idx, + submodel_idx, + submodel_input_idx) + submodel_inputs.append( + tf.identity( + calibration_output[feature_name][calibration_output_idx], + name=passthrough_name)) + + return submodels_inputs + + +def _monotonicities_from_feature_configs(feature_configs): + """Returns list of monotonicities defined in the given feature_configs.""" + monotonicities = [] + for feature_config in feature_configs: + if not feature_config.monotonicity: + monotonicities.append(0) + elif (isinstance(feature_config.monotonicity, six.string_types) and + feature_config.monotonicity.lower() == 'none'): + monotonicities.append(0) + else: + monotonicities.append(1) + return monotonicities + + +def _dominance_constraints_from_feature_configs(feature_configs): + """Returns list of dominance constraints in the given feature_configs.""" + feature_names = [feature_config.name for feature_config in feature_configs] + monotonic_dominances = [] + for dominant_idx, dominant_feature_config in enumerate(feature_configs): + for dominance_config in dominant_feature_config.dominates or []: + if dominance_config.feature_name in feature_names: + weak_idx = feature_names.index(dominance_config.feature_name) + if dominance_config.dominance_type == 'monotonic': + monotonic_dominances.append((dominant_idx, weak_idx)) + else: + raise ValueError('Unrecognized dominance type: {}'.format( + dominance_config.dominance_type)) + return monotonic_dominances + + +def _linear_layer(linear_input, feature_configs, model_config, weighted_average, + submodel_index, dtype): + """Creates a linear layer initialized to be an average.""" + layer_name = '{}_{}'.format(LINEAR_LAYER_NAME, submodel_index) + + linear_input = tf.keras.layers.Concatenate(axis=1)(linear_input) + num_input_dims = len(feature_configs) + kernel_initializer = tf.compat.v1.constant_initializer( + [1.0 / num_input_dims] * num_input_dims) + bias_initializer = tf.compat.v1.constant_initializer(0) + + if weighted_average: + # Linear coefficients should be possitive and sum up to one. + linear_monotonicities = [1] * num_input_dims + normalization_order = 1 + use_bias = False + else: + linear_monotonicities = _monotonicities_from_feature_configs( + feature_configs) + normalization_order = None + use_bias = model_config.use_bias + + monotonic_dominances = _dominance_constraints_from_feature_configs( + feature_configs) + + return linear_layer.Linear( + num_input_dims=num_input_dims, + monotonicities=linear_monotonicities, + monotonic_dominances=monotonic_dominances, + use_bias=use_bias, + normalization_order=normalization_order, + kernel_initializer=kernel_initializer, + bias_initializer=bias_initializer, + dtype=dtype, + name=layer_name)( + linear_input) + + +def _lattice_layer(lattice_input, feature_configs, model_config, + layer_output_range, submodel_index, is_inside_ensemble, + dtype): + """Creates a lattice layer.""" + layer_name = '{}_{}'.format(LATTICE_LAYER_NAME, submodel_index) + + (output_min, output_max, output_init_min, + output_init_max) = _output_range(layer_output_range, model_config) + + feature_names = [feature_config.name for feature_config in feature_configs] + lattice_sizes = [ + feature_config.lattice_size for feature_config in feature_configs + ] + lattice_monotonicities = _monotonicities_from_feature_configs(feature_configs) + lattice_unimodalities = [ + feature_config.unimodality for feature_config in feature_configs + ] + lattice_regularizers = _lattice_regularizers(model_config, feature_configs) + + # Construct trust constraints within this lattice. + edgeworth_trusts = [] + trapezoid_trusts = [] + for conditional_idx, conditional_feature_config in enumerate(feature_configs): + for trust_config in conditional_feature_config.reflects_trust_in or []: + if trust_config.feature_name in feature_names: + main_idx = feature_names.index(trust_config.feature_name) + if trust_config.trust_type == 'edgeworth': + edgeworth_trusts.append( + (main_idx, conditional_idx, trust_config.direction)) + elif trust_config.trust_type == 'trapezoid': + trapezoid_trusts.append( + (main_idx, conditional_idx, trust_config.direction)) + else: + raise ValueError('Unrecognized trust type: {}'.format( + trust_config.trust_type)) + elif is_inside_ensemble and trust_config.trust_type == 'trapezoid': + logging.warning( + 'A "main" feature (%s) for a trapezoid trust constraint is not ' + 'present in a lattice that includes the "conditional" feature ' + '(%s). In an ensemble model, this can result in constraint ' + 'violations. Consider manually setting the ensemble structure if ' + 'this constraint needs to be satisfied.', trust_config.feature_name, + conditional_feature_config.name) + + monotonic_dominances = _dominance_constraints_from_feature_configs( + feature_configs) + + kernel_initializer = lattice_layer.LinearInitializer( + lattice_sizes=lattice_sizes, + monotonicities=lattice_monotonicities, + unimodalities=lattice_unimodalities, + output_min=output_init_min, + output_max=output_init_max) + return lattice_layer.Lattice( + lattice_sizes=lattice_sizes, + monotonicities=lattice_monotonicities, + unimodalities=lattice_unimodalities, + edgeworth_trusts=edgeworth_trusts, + trapezoid_trusts=trapezoid_trusts, + monotonic_dominances=monotonic_dominances, + output_min=output_min, + output_max=output_max, + clip_inputs=False, + kernel_regularizer=lattice_regularizers, + kernel_initializer=kernel_initializer, + dtype=dtype, + name=layer_name)( + lattice_input) + + +def _output_calibration_layer(output_calibration_input, model_config, dtype): + """Creates a monotonic output calibration layer with inputs range [0, 1].""" + # kernel format: bias followed by diffs between consecutive keypoint outputs. + kernel_init_values = np.ediff1d( + model_config.output_initialization, + to_begin=model_config.output_initialization[0]) + input_keypoints = np.linspace(0.0, 1.0, num=len(kernel_init_values)) + kernel_initializer = tf.compat.v1.constant_initializer(kernel_init_values) + kernel_regularizer = _output_calibration_regularizers(model_config) + return pwl_calibration_layer.PWLCalibration( + input_keypoints=input_keypoints, + output_min=model_config.output_min, + output_max=model_config.output_max, + kernel_initializer=kernel_initializer, + kernel_regularizer=kernel_regularizer, + monotonicity=1, + dtype=dtype, + name=OUTPUT_CALIB_LAYER_NAME)( + output_calibration_input) + + +def _calibrated_lattice_ensemble_model_fn(features, labels, label_dimension, + feature_columns, mode, head, + model_config, optimizer, config, + dtype): + """Calibrated Lattice Ensemble Model.""" + del config + if label_dimension != 1: + ValueError('Only 1-dimensional output is supported.') + + # Get input tensors and corresponding feature configs. + transformed_features = transform_features(features, feature_columns) + feature_names = list(transformed_features.keys()) + feature_configs = [ + model_config.feature_config_by_name(feature_name) + for feature_name in feature_names + ] + input_layer = _input_layer(feature_configs=feature_configs, dtype=dtype) + + submodels_inputs = _calibration_layers( + calibration_input_layer=input_layer, + feature_configs=feature_configs, + model_config=model_config, + layer_output_range=_LayerOutputRange.INPUT_TO_LATTICE, + submodels=model_config.lattices, + separate_calibrators=model_config.separate_calibrators, + dtype=dtype) + + lattice_outputs = [] + for submodel_index, (lattice_feature_names, lattice_input) in enumerate( + zip(model_config.lattices, submodels_inputs)): + lattice_feature_configs = [ + model_config.feature_config_by_name(feature_name) + for feature_name in lattice_feature_names + ] + + lattice_layer_output_range = ( + _LayerOutputRange.INPUT_TO_FINAL_CALIBRATION + if model_config.output_calibration else _LayerOutputRange.MODEL_OUTPUT) + lattice_outputs.append( + _lattice_layer( + lattice_input=lattice_input, + feature_configs=lattice_feature_configs, + model_config=model_config, + layer_output_range=lattice_layer_output_range, + submodel_index=submodel_index, + is_inside_ensemble=True, + dtype=dtype)) + + averaged_lattice_output = tf.keras.layers.Average()(lattice_outputs) + if model_config.output_calibration: + model_output = _output_calibration_layer( + output_calibration_input=averaged_lattice_output, + model_config=model_config, + dtype=dtype) + else: + model_output = averaged_lattice_output + + input_tensors = [ + transformed_features[feature_name] for feature_name in feature_names + ] + inputs = [input_layer[feature_name] for feature_name in feature_names] + training = (mode == tf.estimator.ModeKeys.TRAIN) + model = tf.keras.Model(inputs=inputs, outputs=model_output) + logits = tf.identity( + model(input_tensors, training=training), name=OUTPUT_NAME) + + if training: + optimizer = optimizers.get_optimizer_instance_v2(optimizer) + optimizer.iterations = training_util.get_or_create_global_step() + else: + optimizer = None + + return head.create_estimator_spec( + features=features, + mode=mode, + labels=labels, + optimizer=optimizer, + logits=logits, + trainable_variables=model.trainable_variables, + update_ops=model.updates, + regularization_losses=model.losses or None) + + +def _calibrated_lattice_model_fn(features, labels, label_dimension, + feature_columns, mode, head, model_config, + optimizer, config, dtype): + """Calibrated Lattice Model.""" + del config + if label_dimension != 1: + ValueError('Only 1-dimensional output is supported.') + + # Get input tensors and corresponding feature configs. + transformed_features = transform_features(features, feature_columns) + feature_names = list(transformed_features.keys()) + feature_configs = [ + model_config.feature_config_by_name(feature_name) + for feature_name in feature_names + ] + input_layer = _input_layer(feature_configs=feature_configs, dtype=dtype) + submodels_inputs = _calibration_layers( + calibration_input_layer=input_layer, + feature_configs=feature_configs, + model_config=model_config, + layer_output_range=_LayerOutputRange.INPUT_TO_LATTICE, + submodels=[[feature_column.name for feature_column in feature_columns]], + separate_calibrators=False, + dtype=dtype) + + lattice_layer_output_range = ( + _LayerOutputRange.INPUT_TO_FINAL_CALIBRATION + if model_config.output_calibration else _LayerOutputRange.MODEL_OUTPUT) + lattice_output = _lattice_layer( + lattice_input=submodels_inputs[0], + feature_configs=feature_configs, + model_config=model_config, + layer_output_range=lattice_layer_output_range, + submodel_index=0, + is_inside_ensemble=False, + dtype=dtype) + + if model_config.output_calibration: + model_output = _output_calibration_layer( + output_calibration_input=lattice_output, + model_config=model_config, + dtype=dtype) + else: + model_output = lattice_output + + input_tensors = [ + transformed_features[feature_name] for feature_name in feature_names + ] + inputs = [input_layer[feature_name] for feature_name in feature_names] + training = (mode == tf.estimator.ModeKeys.TRAIN) + model = tf.keras.Model(inputs=inputs, outputs=model_output) + logits = tf.identity( + model(input_tensors, training=training), name=OUTPUT_NAME) + + if training: + optimizer = optimizers.get_optimizer_instance_v2(optimizer) + optimizer.iterations = training_util.get_or_create_global_step() + + return head.create_estimator_spec( + features=features, + mode=mode, + labels=labels, + optimizer=optimizer, + logits=logits, + trainable_variables=model.trainable_variables, + update_ops=model.updates, + regularization_losses=model.losses or None) + + +def _calibrated_linear_model_fn(features, labels, label_dimension, + feature_columns, mode, head, model_config, + optimizer, config, dtype): + """Calibrated Linear Model.""" + del config + if label_dimension != 1: + ValueError('Only 1-dimensional output is supported.') + + # Get input tensors and corresponding feature configs. + transformed_features = transform_features(features, feature_columns) + feature_names = list(transformed_features.keys()) + feature_configs = [ + model_config.feature_config_by_name(feature_name) + for feature_name in feature_names + ] + input_layer = _input_layer(feature_configs=feature_configs, dtype=dtype) + + calibration_layer_output_range = ( + _LayerOutputRange.INPUT_TO_FINAL_CALIBRATION + if model_config.output_calibration else _LayerOutputRange.MODEL_OUTPUT) + submodels_inputs = _calibration_layers( + calibration_input_layer=input_layer, + feature_configs=feature_configs, + model_config=model_config, + layer_output_range=calibration_layer_output_range, + submodels=[[feature_column.name for feature_column in feature_columns]], + separate_calibrators=False, + dtype=dtype) + + weighted_average = ( + model_config.output_min is not None or + model_config.output_max is not None or model_config.output_calibration) + linear_output = _linear_layer( + linear_input=submodels_inputs[0], + feature_configs=feature_configs, + model_config=model_config, + weighted_average=weighted_average, + submodel_index=0, + dtype=dtype) + + if model_config.output_calibration: + model_output = _output_calibration_layer( + output_calibration_input=linear_output, + model_config=model_config, + dtype=dtype) + else: + model_output = linear_output + + input_tensors = [ + transformed_features[feature_name] for feature_name in feature_names + ] + inputs = [input_layer[feature_name] for feature_name in feature_names] + training = (mode == tf.estimator.ModeKeys.TRAIN) + model = tf.keras.Model(inputs=inputs, outputs=model_output) + logits = tf.identity( + model(input_tensors, training=training), name=OUTPUT_NAME) + + if training: + optimizer = optimizers.get_optimizer_instance_v2(optimizer) + optimizer.iterations = training_util.get_or_create_global_step() + + return head.create_estimator_spec( + features=features, + mode=mode, + labels=labels, + optimizer=optimizer, + logits=logits, + trainable_variables=model.trainable_variables, + update_ops=model.updates, + regularization_losses=model.losses or None) + + +def _get_model_fn(label_dimension, feature_columns, head, model_config, + optimizer, dtype): + """Returns the model_fn for the given model_config.""" + if isinstance(model_config, configs.CalibratedLatticeConfig): + + def calibrated_lattice_model_fn(features, labels, mode, config): + return _calibrated_lattice_model_fn( + features=features, + labels=labels, + label_dimension=label_dimension, + feature_columns=feature_columns, + mode=mode, + head=head, + model_config=model_config, + optimizer=optimizer, + config=config, + dtype=dtype) + + return calibrated_lattice_model_fn + elif isinstance(model_config, configs.CalibratedLinearConfig): + + def calibrated_linear_model_fn(features, labels, mode, config): + return _calibrated_linear_model_fn( + features=features, + labels=labels, + label_dimension=label_dimension, + feature_columns=feature_columns, + mode=mode, + head=head, + model_config=model_config, + optimizer=optimizer, + config=config, + dtype=dtype) + + return calibrated_linear_model_fn + if isinstance(model_config, configs.CalibratedLatticeEnsembleConfig): + + def calibrated_lattice_ensemble_model_fn(features, labels, mode, config): + return _calibrated_lattice_ensemble_model_fn( + features=features, + labels=labels, + label_dimension=label_dimension, + feature_columns=feature_columns, + mode=mode, + head=head, + model_config=model_config, + optimizer=optimizer, + config=config, + dtype=dtype) + + return calibrated_lattice_ensemble_model_fn + else: + raise ValueError('Unsupported model type: {}'.format(type(model_config))) + + +class CannedEstimator(estimator_lib.EstimatorV2): + """An estimator for TensorFlow lattice models. + + Creates an estimator with a custom head for the model architecutre specified + by the `model_config`, which should be one of those defined in `tfl.configs`. + Calculation of feature quantiles for input keypoint initialization is done + using `feature_analysis_input_fn`. If this auxiliary input fn is not provided, + all keypoint values should be explicitly provided via the `model_config`. + + Example: + + ```python + model_config = tfl.configs.CalibratedLatticeConfig(...) + feature_analysis_input_fn = create_input_fn(num_epochs=1, ...) + train_input_fn = create_input_fn(num_epochs=100, ...) + head = ... + estimator = tfl.estimators.CannedEstimator( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=feature_analysis_input_fn + head=head) + estimator.train(input_fn=train_input_fn) + ``` + """ + + def __init__(self, + head, + model_config, + feature_columns, + feature_analysis_input_fn=None, + prefitting_input_fn=None, + model_dir=None, + label_dimension=1, + optimizer='Adagrad', + prefitting_optimizer='Adagrad', + prefitting_steps=None, + config=None, + warm_start_from=None, + dtype=tf.float32): + """Initializes a `CannedEstimator` instance. + + Args: + head: A `_Head` instance constructed with a method such as + `tf.contrib.estimator.multi_label_head`. + model_config: Model configuration object describing model architecutre. + Should be one of the model configs in `tfl.configs`. + feature_columns: An iterable containing all the feature columns used by + the model. + feature_analysis_input_fn: An input_fn used to calculate statistics about + features and labels in order to setup calibration keypoint and values. + prefitting_input_fn: An input_fn used in the pre fitting stage to estimate + non-linear feature interactions. Required for crystals models. + Prefitting typically uses the same dataset as the main training, but + with fewer epochs. + model_dir: Directory to save model parameters, graph and etc. This can + also be used to load checkpoints from the directory into a estimator to + continue training a previously saved model. + label_dimension: Number of regression targets per example. This is the + size of the last dimension of the labels and logits `Tensor` objects + (typically, these have shape `[batch_size, label_dimension]`). + optimizer: An instance of `tf.Optimizer` used to train the model. Can also + be a string (one of 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or + callable. Defaults to Adagrad optimizer. + prefitting_optimizer: An instance of `tf.Optimizer` used to train the + model during the pre-fitting stage. Can also be a string (one of + 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to + Adagrad optimizer. + prefitting_steps: Number of steps for which to pretraing train the model + during the prefitting stage. If None, train forever or train until + prefitting_input_fn generates the tf.errors.OutOfRange error or + StopIteration exception. + config: `RunConfig` object to configure the runtime settings. + warm_start_from: A string filepath to a checkpoint to warm-start from, or + a `WarmStartSettings` object to fully configure warm-starting. If the + string filepath is provided instead of a `WarmStartSettings`, then all + weights are warm-started, and it is assumed that vocabularies and Tensor + names are unchanged. + dtype: dtype of layers used in the model. + """ + config = estimator_lib.maybe_overwrite_model_dir_and_session_config( + config, model_dir) + model_dir = config.model_dir + + model_config = copy.deepcopy(model_config) + _update_by_feature_columns(model_config, feature_columns) + + _finalize_keypoints( + model_config=model_config, + config=config, + feature_columns=feature_columns, + feature_analysis_input_fn=feature_analysis_input_fn, + logits_output=True) + + _verify_config(model_config, feature_columns) + + _finalize_model_structure( + label_dimension=label_dimension, + feature_columns=feature_columns, + head=head, + model_config=model_config, + prefitting_input_fn=prefitting_input_fn, + prefitting_optimizer=prefitting_optimizer, + prefitting_steps=prefitting_steps, + model_dir=model_dir, + config=config, + warm_start_from=warm_start_from, + dtype=dtype) + + model_fn = _get_model_fn( + label_dimension=label_dimension, + feature_columns=feature_columns, + head=head, + model_config=model_config, + optimizer=optimizer, + dtype=dtype) + + super(CannedEstimator, self).__init__( + model_fn=model_fn, + model_dir=model_dir, + config=config, + warm_start_from=warm_start_from) + + +class CannedClassifier(estimator_lib.EstimatorV2): + """Canned classifier for TensorFlow lattice models. + + Creates a classifier for the model architecutre specified by the + `model_config`, which should be one of those defined in `tfl.configs`. + Calclulation of feature quantiles for input keypoint initialization is done + using `feature_analysis_input_fn`. If this auxiliary input fn is not provided, + all keypoint values should be explicitly provided via the `model_config`. + + Training loss is softmax cross-entropy as defined for the default + TF classificaiton head. + + Example: + + ```python + model_config = tfl.configs.CalibratedLatticeConfig(...) + feature_analysis_input_fn = create_input_fn(num_epochs=1, ...) + train_input_fn = create_input_fn(num_epochs=100, ...) + estimator = tfl.estimators.CannedClassifier( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=feature_analysis_input_fn) + estimator.train(input_fn=train_input_fn) + ``` + """ + + def __init__(self, + model_config, + feature_columns, + feature_analysis_input_fn=None, + prefitting_input_fn=None, + model_dir=None, + n_classes=2, + weight_column=None, + label_vocabulary=None, + optimizer='Adagrad', + prefitting_optimizer='Adagrad', + prefitting_steps=None, + config=None, + warm_start_from=None, + loss_reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE, + dtype=tf.float32): + """Initializes a `CannedClassifier` instance. + + Args: + model_config: Model configuration object describing model architecutre. + Should be one of the model configs in `tfl.configs`. + feature_columns: An iterable containing all the feature columns used by + the model. + feature_analysis_input_fn: An input_fn used to calculate statistics about + features and labels in order to setup calibration keypoint and values. + prefitting_input_fn: An input_fn used in the pre fitting stage to estimate + non-linear feature interactions. Required for crystals models. + Prefitting typically uses the same dataset as the main training, but + with fewer epochs. + model_dir: Directory to save model parameters, graph and etc. This can + also be used to load checkpoints from the directory into a estimator to + continue training a previously saved model. + n_classes: Number of label classes. Defaults to 2, namely binary + classification. Must be > 1. + weight_column: A string or a `_NumericColumn` created by + `tf.feature_column.numeric_column` defining feature column representing + weights. It is used to down weight or boost examples during training. It + will be multiplied by the loss of the example. If it is a string, it is + used as a key to fetch weight tensor from the `features`. If it is a + `_NumericColumn`, raw tensor is fetched by key `weight_column.key`, then + weight_column.normalizer_fn is applied on it to get weight tensor. + label_vocabulary: A list of strings represents possible label values. If + given, labels must be string type and have any value in + `label_vocabulary`. If it is not given, that means labels are already + encoded as integer or float within [0, 1] for `n_classes=2` and encoded + as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 . Also + there will be errors if vocabulary is not provided and labels are + string. + optimizer: An instance of `tf.Optimizer` used to train the model. Can also + be a string (one of 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or + callable. Defaults to Adagrad optimizer. + prefitting_optimizer: An instance of `tf.Optimizer` used to train the + model during the pre-fitting stage. Can also be a string (one of + 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to + Adagrad optimizer. + prefitting_steps: Number of steps for which to pretraing train the model + during the prefitting stage. If None, train forever or train until + prefitting_input_fn generates the tf.errors.OutOfRange error or + StopIteration exception. + config: `RunConfig` object to configure the runtime settings. + warm_start_from: A string filepath to a checkpoint to warm-start from, or + a `WarmStartSettings` object to fully configure warm-starting. If the + string filepath is provided instead of a `WarmStartSettings`, then all + weights are warm-started, and it is assumed that vocabularies and Tensor + names are unchanged. + loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how + to reduce training loss over batch. Defaults to `SUM_OVER_BATCH_SIZE`. + dtype: dtype of layers used in the model. + """ + config = estimator_lib.maybe_overwrite_model_dir_and_session_config( + config, model_dir) + model_dir = config.model_dir + head = head_utils.binary_or_multi_class_head( + n_classes=n_classes, + weight_column=weight_column, + label_vocabulary=label_vocabulary, + loss_reduction=loss_reduction) + label_dimension = 1 if n_classes == 2 else n_classes + + model_config = copy.deepcopy(model_config) + _update_by_feature_columns(model_config, feature_columns) + + _finalize_keypoints( + model_config=model_config, + config=config, + feature_columns=feature_columns, + feature_analysis_input_fn=feature_analysis_input_fn, + logits_output=True) + + _verify_config(model_config, feature_columns) + + _finalize_model_structure( + label_dimension=label_dimension, + feature_columns=feature_columns, + head=head, + model_config=model_config, + prefitting_input_fn=prefitting_input_fn, + prefitting_optimizer=prefitting_optimizer, + prefitting_steps=prefitting_steps, + model_dir=model_dir, + config=config, + warm_start_from=warm_start_from, + dtype=dtype) + + model_fn = _get_model_fn( + label_dimension=label_dimension, + feature_columns=feature_columns, + head=head, + model_config=model_config, + optimizer=optimizer, + dtype=dtype) + + super(CannedClassifier, self).__init__( + model_fn=model_fn, + model_dir=model_dir, + config=config, + warm_start_from=warm_start_from) + + +class CannedRegressor(estimator_lib.EstimatorV2): + """A regressor for TensorFlow lattice models. + + Creates a regressor for the model architecutre specified by the + `model_config`, which should be one of those defined in `tfl.configs`. + Calclulation of feature quantiles for input keypoint initialization is done + using `feature_analysis_input_fn`. If this auxiliary input fn is not provided, + all keypoint values should be explicitly provided via the `model_config`. + + Training loss is squared error as defined for the default TF regression head. + + Example: + + ```python + model_config = tfl.configs.CalibratedLatticeConfig(...) + feature_analysis_input_fn = create_input_fn(num_epochs=1, ...) + train_input_fn = create_input_fn(num_epochs=100, ...) + estimator = tfl.estimators.CannedRegressor( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=feature_analysis_input_fn) + estimator.train(input_fn=train_input_fn) + ``` + """ + + def __init__(self, + model_config, + feature_columns, + feature_analysis_input_fn=None, + prefitting_input_fn=None, + model_dir=None, + label_dimension=1, + weight_column=None, + optimizer='Adagrad', + prefitting_optimizer='Adagrad', + prefitting_steps=None, + config=None, + warm_start_from=None, + loss_reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE, + dtype=tf.float32): + """Initializes a `CannedRegressor` instance. + + Args: + model_config: Model configuration object describing model architecutre. + Should be one of the model configs in `tfl.configs`. + feature_columns: An iterable containing all the feature columns used by + the model. + feature_analysis_input_fn: An input_fn used to calculate statistics about + features and labels in order to setup calibration keypoint and values. + prefitting_input_fn: An input_fn used in the pre fitting stage to estimate + non-linear feature interactions. Required for crystals models. + Prefitting typically uses the same dataset as the main training, but + with fewer epochs. + model_dir: Directory to save model parameters, graph and etc. This can + also be used to load checkpoints from the directory into a estimator to + continue training a previously saved model. + label_dimension: Number of regression targets per example. This is the + size of the last dimension of the labels and logits `Tensor` objects + (typically, these have shape `[batch_size, label_dimension]`). + weight_column: A string or a `_NumericColumn` created by + `tf.feature_column.numeric_column` defining feature column representing + weights. It is used to down weight or boost examples during training. It + will be multiplied by the loss of the example. If it is a string, it is + used as a key to fetch weight tensor from the `features`. If it is a + `_NumericColumn`, raw tensor is fetched by key `weight_column.key`, then + weight_column.normalizer_fn is applied on it to get weight tensor. + optimizer: An instance of `tf.Optimizer` used to train the model. Can also + be a string (one of 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or + callable. Defaults to Adagrad optimizer. + prefitting_optimizer: An instance of `tf.Optimizer` used to train the + model during the pre-fitting stage. Can also be a string (one of + 'Adagrad', 'Adam', 'Ftrl', 'RMSProp', 'SGD'), or callable. Defaults to + Adagrad optimizer. + prefitting_steps: Number of steps for which to pretraing train the model + during the prefitting stage. If None, train forever or train until + prefitting_input_fn generates the tf.errors.OutOfRange error or + StopIteration exception. + config: `RunConfig` object to configure the runtime settings. + warm_start_from: A string filepath to a checkpoint to warm-start from, or + a `WarmStartSettings` object to fully configure warm-starting. If the + string filepath is provided instead of a `WarmStartSettings`, then all + weights are warm-started, and it is assumed that vocabularies and Tensor + names are unchanged. + loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how + to reduce training loss over batch. Defaults to `SUM_OVER_BATCH_SIZE`. + dtype: dtype of layers used in the model. + """ + config = estimator_lib.maybe_overwrite_model_dir_and_session_config( + config, model_dir) + model_dir = config.model_dir + head = regression_head.RegressionHead( + label_dimension=label_dimension, + weight_column=weight_column, + loss_reduction=loss_reduction) + + model_config = copy.deepcopy(model_config) + _update_by_feature_columns(model_config, feature_columns) + + _finalize_keypoints( + model_config=model_config, + config=config, + feature_columns=feature_columns, + feature_analysis_input_fn=feature_analysis_input_fn, + logits_output=True) + + _verify_config(model_config, feature_columns) + + _finalize_model_structure( + label_dimension=label_dimension, + feature_columns=feature_columns, + head=head, + model_config=model_config, + prefitting_input_fn=prefitting_input_fn, + prefitting_optimizer=prefitting_optimizer, + prefitting_steps=prefitting_steps, + model_dir=model_dir, + config=config, + warm_start_from=warm_start_from, + dtype=dtype) + + model_fn = _get_model_fn( + label_dimension=label_dimension, + feature_columns=feature_columns, + head=head, + model_config=model_config, + optimizer=optimizer, + dtype=dtype) + + super(CannedRegressor, self).__init__( + model_fn=model_fn, + model_dir=model_dir, + config=config, + warm_start_from=warm_start_from) + + +def _match_op(ops, regex): + """Returns ops that match given regex along with the matched sections.""" + matches = [] + for op in ops: + op_matches = re.findall(regex, op) + if op_matches: + matches.append((op, op_matches[0])) + return matches + + +def get_model_graph(saved_model_path, tag='serve'): + """Returns all layers and parameters used in a saved model as a graph. + + The returned graph is not a TF graph, rather a graph of python object that + encodes the model structure and includes trained model parameters. The graph + can be used by the `tfl.visualization` module for plotting and other + visualization and analysis. + + Example: + + ```python + model_graph = estimators.get_model_graph(saved_model_path) + visualization.plot_feature_calibrator(model_graph, "feature_name") + visualization.plot_all_calibrators(model_graph) + visualization.draw_model_graph(model_graph) + ``` + + Args: + saved_model_path: Path to the saved model. + tag: Saved model tag for loading. + + Returns: + A `model_info.ModelGraph` object that includes the model graph. + """ + # List of all the nodes in the model. + nodes = [] + + # Dict from feature name to corresponding InputFeatureNode object. + feature_nodes = {} + + # Dict from submodel index to a list of calibrated inputs for the submodel. + submodel_input_nodes = collections.defaultdict(list) + + # Dict from submodel index to the output node of the submodel. + submodel_output_nodes = {} + + tf.compat.v1.reset_default_graph() + with tf.compat.v1.Session() as sess: + tf.compat.v1.saved_model.loader.load(sess, [tag], saved_model_path) + g = tf.compat.v1.get_default_graph() + ops = [op.name for op in g.get_operations()] + + ############################# + # Create input feature nodes. + ############################# + + # Extract list of features from the graph. + # {FEATURES_SCOPE}/{feature_name} + feature_op_re = '{}/(.*)'.format(FEATURES_SCOPE) + for (_, feature_name) in _match_op(ops, feature_op_re): + category_table_op = 'transform/{}_lookup/Const'.format(feature_name) + if category_table_op in ops: + is_categorical = True + vocabulary_list = sess.run( + g.get_operation_by_name(category_table_op).outputs[0]) + # Replace byte types with their string values. + vocabulary_list = [ + str(x.decode()) if isinstance(x, bytes) else str(x) + for x in vocabulary_list + ] + else: + is_categorical = False + vocabulary_list = None + + feature_node = model_info.InputFeatureNode( + name=feature_name, + is_categorical=is_categorical, + vocabulary_list=vocabulary_list) + feature_nodes[feature_name] = feature_node + nodes.append(feature_node) + + ####################################### + # Create categorical calibration nodes. + ####################################### + + # Get calibrator output values. We need to call the read variable op. + # {CALIB_LAYER_NAME}_{feature_name}/ + # {CATEGORICAL_CALIBRATION_KERNEL_NAME}/Read/ReadVariableOp + kernel_op_re = '^{}_(.*)/{}/Read/ReadVariableOp$'.format( + CALIB_LAYER_NAME, + categorical_calibration_layer.CATEGORICAL_CALIBRATION_KERNEL_NAME, + ) + for kernel_op, feature_name in _match_op(ops, kernel_op_re): + output_values = sess.run(g.get_operation_by_name(kernel_op).outputs[0]) + + # Get default input value if defined. + # {CALIB_LAYER_NAME}_{feature_name}/ + # {DEFAULT_INPUT_VALUE_NAME} + default_input_value_op = '^{}_{}/{}$'.format( + CALIB_LAYER_NAME, + feature_name, + categorical_calibration_layer.DEFAULT_INPUT_VALUE_NAME, + ) + if default_input_value_op in ops: + default_input = sess.run( + g.get_operation_by_name(default_input_value_op).outputs[0]) + else: + default_input = None + + # Create one calibration node per output dimension of the calibrator. + categorical_calibration_nodes = [] + for calibration_output_idx in range(output_values.shape[1]): + categorical_calibration_node = model_info.CategoricalCalibrationNode( + input_node=feature_nodes[feature_name], + output_values=output_values[:, calibration_output_idx], + default_input=default_input) + categorical_calibration_nodes.append(categorical_calibration_node) + nodes.append(categorical_calibration_node) + + # Identity passthrough ops that pass this calibration to each submodel. + # {CALIB_PASSTHROUGH_NAME}_{feature_name}_ + # {calibration_output_idx}_{submodel_idx}_{submodel_input_idx} + shared_calib_passthrough_op_re = r'^{}_{}_(\d*)_(\d*)_(\d*)$'.format( + CALIB_PASSTHROUGH_NAME, feature_name) + for op, (calibration_output_idx, submodel_idx, + submodel_input_idx) in _match_op(ops, + shared_calib_passthrough_op_re): + submodel_input_nodes[submodel_idx].append( + (submodel_input_idx, + categorical_calibration_nodes[int(calibration_output_idx)])) + + ############################### + # Create PWL calibration nodes. + ############################### + + # Calculate input keypoints. + # We extract lengh (deltas between keypoints) and kernel interpolation + # keypoints (which does not include the last keypoint), and then + # construct the full keypoints list using both. + + # Lengths (deltas between keypoints). + # {CALIB_LAYER_NAME}_{feature_name}/{LENGTHS_NAME} + lengths_op_re = '^{}_(.*)/{}$'.format( + CALIB_LAYER_NAME, + pwl_calibration_layer.LENGTHS_NAME, + ) + for lengths_op, feature_name in _match_op(ops, lengths_op_re): + # Interpolation keypoints does not inlcude the last input keypoint. + # {CALIB_LAYER_NAME}_{feature_name}/{INTERPOLATION_KEYPOINTS_NAME} + keypoints_op = '{}_{}/{}'.format( + CALIB_LAYER_NAME, + feature_name, + pwl_calibration_layer.INTERPOLATION_KEYPOINTS_NAME, + ) + + # Output keypoints. We need to call the varible read op. + # {CALIB_LAYER_NAME}_{feature_name}/{PWL_CALIBRATION_KERNEL_NAME} + kernel_op = '{}_{}/{}/Read/ReadVariableOp'.format( + CALIB_LAYER_NAME, + feature_name, + pwl_calibration_layer.PWL_CALIBRATION_KERNEL_NAME, + ) + + (lengths, keypoints, kernel) = sess.run( + (g.get_operation_by_name(lengths_op).outputs[0], + g.get_operation_by_name(keypoints_op).outputs[0], + g.get_operation_by_name(kernel_op).outputs[0])) + output_keypoints = np.cumsum(kernel, axis=0) + + # Add the last keypoint to the keypoint list. + # TODO: handle cyclic PWL layers. + input_keypoints = np.append(keypoints, keypoints[-1] + lengths[-1]) + + # Get missing/default input value if present: + # {CALIB_LAYER_NAME}_{feature_name}/{MISSING_INPUT_VALUE_NAME} + default_input_value_op = '{}_{}/{}'.format( + CALIB_LAYER_NAME, + feature_name, + pwl_calibration_layer.MISSING_INPUT_VALUE_NAME, + ) + if default_input_value_op in ops: + default_input = sess.run( + g.get_operation_by_name(default_input_value_op).outputs[0])[0] + else: + default_input = None + + # Find corresponding default/missing output if present. + # {CALIB_LAYER_NAME}_{feature_name}/{PWL_CALIBRATION_MISSING_OUTPUT_NAME} + default_output_op = '{}_{}/{}/Read/ReadVariableOp'.format( + CALIB_LAYER_NAME, + feature_name, + pwl_calibration_layer.PWL_CALIBRATION_MISSING_OUTPUT_NAME, + ) + if default_output_op in ops: + default_output = sess.run( + g.get_operation_by_name(default_output_op).outputs[0]) + else: + default_output = None + + # Create one calibration node per output dimension of the calibrator. + pwl_calibration_nodes = [] + for calibration_output_idx in range(output_keypoints.shape[1]): + pwl_calibration_node = model_info.PWLCalibrationNode( + input_node=feature_nodes[feature_name], + input_keypoints=input_keypoints, + output_keypoints=output_keypoints[:, calibration_output_idx], + default_input=default_input, + default_output=(None if default_output is None else + default_output[:, calibration_output_idx])) + pwl_calibration_nodes.append(pwl_calibration_node) + nodes.append(pwl_calibration_node) + + # Identity passthrough ops that pass this calibration to each submodel. + # {CALIB_PASSTHROUGH_NAME}_{feature_name}_ + # {calibration_output_idx}_{submodel_idx}_{submodel_input_idx} + shared_calib_passthrough_op_re = r'^{}_{}_(\d*)_(\d*)_(\d*)$'.format( + CALIB_PASSTHROUGH_NAME, feature_name) + for op, (calibration_output_idx, submodel_idx, + submodel_input_idx) in _match_op(ops, + shared_calib_passthrough_op_re): + submodel_input_nodes[submodel_idx].append( + (submodel_input_idx, + pwl_calibration_nodes[int(calibration_output_idx)])) + + ###################### + # Create linear nodes. + ###################### + + # Linear coefficients. + # {LINEAR_LAYER_NAME}_{submodel_idx}/{LINEAR_LAYER_KERNEL_NAME} + linear_kernel_op_re = '^{}_(.*)/{}/Read/ReadVariableOp$'.format( + LINEAR_LAYER_NAME, + linear_layer.LINEAR_LAYER_KERNEL_NAME, + ) + for linear_kernel_op, submodel_idx in _match_op(ops, linear_kernel_op_re): + coefficients = sess.run( + g.get_operation_by_name(linear_kernel_op).outputs[0]).flatten() + + # Bias term. + # {LINEAR_LAYER_NAME}/{LINEAR_LAYER_BIAS_NAME} + bias_op = '{}/{}/Read/ReadVariableOp'.format( + LINEAR_LAYER_NAME, + linear_layer.LINEAR_LAYER_BIAS_NAME, + ) + if bias_op in ops: + bias = sess.run(g.get_operation_by_name(bias_op).outputs[0]) + else: + bias = 0.0 + + # Sort input nodes by input index. + input_nodes = [ + node for _, node in sorted(submodel_input_nodes[submodel_idx]) + ] + + linear_node = model_info.LinearNode( + input_nodes=input_nodes, coefficients=coefficients, bias=bias) + submodel_output_nodes[submodel_idx] = linear_node + nodes.append(linear_node) + + ####################### + # Create lattice nodes. + ####################### + + # Lattice weights. + # {Lattice_LAYER_NAME}_{submodel_idx}/{LATTICE_KERNEL_NAME} + lattice_kernel_op_re = '^{}_(.*)/{}/Read/ReadVariableOp$'.format( + LATTICE_LAYER_NAME, + lattice_layer.LATTICE_KERNEL_NAME, + ) + for lattice_kernel_op, submodel_idx in _match_op(ops, lattice_kernel_op_re): + lattice_kernel = sess.run( + g.get_operation_by_name(lattice_kernel_op).outputs[0]).flatten() + + # Lattice sizes. + # {Lattice_LAYER_NAME}_{submodel_idx}/{LATTICE_SIZES_NAME} + lattice_sizes_op_name = '{}_{}/{}'.format( + LATTICE_LAYER_NAME, submodel_idx, lattice_layer.LATTICE_SIZES_NAME) + lattice_sizes = sess.run( + g.get_operation_by_name(lattice_sizes_op_name).outputs[0]).flatten() + + # Shape the flat lattice parameters based on the calculated lattice sizes. + weights = np.reshape(lattice_kernel, lattice_sizes) + + # Sort input nodes by input index. + input_nodes = [ + node for _, node in sorted(submodel_input_nodes[submodel_idx]) + ] + + lattice_node = model_info.LatticeNode( + input_nodes=input_nodes, weights=weights) + submodel_output_nodes[submodel_idx] = lattice_node + nodes.append(lattice_node) + + ################### + # Create mean node. + ################### + + # Mean node is only added for ensemble models. + if len(submodel_output_nodes) > 1: + input_nodes = [ + submodel_output_nodes[idx] + for idx in sorted(submodel_output_nodes.keys(), key=int) + ] + average_node = model_info.MeanNode(input_nodes=input_nodes) + nodes.append(average_node) + model_output_node = average_node + else: + model_output_node = list(submodel_output_nodes.values())[0] + + ##################################### + # Create output PWL calibration node. + ##################################### + + # Lengths (deltas between keypoints). + # {OUTPUT_CALIB_LAYER_NAME}/{LENGTHS_NAME} + lengths_op = '{}/{}'.format( + OUTPUT_CALIB_LAYER_NAME, + pwl_calibration_layer.LENGTHS_NAME, + ) + if lengths_op in ops: + # Interpolation keypoints does not inlcude the last input keypoint. + # {OUTPUT_CALIB_LAYER_NAME}/{INTERPOLATION_KEYPOINTS_NAME} + keypoints_op = '{}/{}'.format( + OUTPUT_CALIB_LAYER_NAME, + pwl_calibration_layer.INTERPOLATION_KEYPOINTS_NAME, + ) + + # Output keypoints. We need to call the varible read op. + # {OUTPUT_CALIB_LAYER_NAME}/{PWL_CALIBRATION_KERNEL_NAME} + kernel_op = '{}/{}/Read/ReadVariableOp'.format( + OUTPUT_CALIB_LAYER_NAME, + pwl_calibration_layer.PWL_CALIBRATION_KERNEL_NAME, + ) + + (lengths, keypoints, kernel) = sess.run( + (g.get_operation_by_name(lengths_op).outputs[0], + g.get_operation_by_name(keypoints_op).outputs[0], + g.get_operation_by_name(kernel_op).outputs[0])) + output_keypoints = np.cumsum(kernel.flatten()) + + # Add the last keypoint to the keypoint list. + input_keypoints = np.append(keypoints, keypoints[-1] + lengths[-1]) + + output_calibration_node = model_info.PWLCalibrationNode( + input_node=model_output_node, + input_keypoints=input_keypoints, + output_keypoints=output_keypoints, + default_input=None, + default_output=None) + nodes.append(output_calibration_node) + model_output_node = output_calibration_node + + return model_info.ModelGraph(nodes=nodes, output_node=model_output_node) diff --git a/tensorflow_lattice/python/estimators/BUILD b/tensorflow_lattice/python/estimators/BUILD deleted file mode 100644 index bef95fb..0000000 --- a/tensorflow_lattice/python/estimators/BUILD +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -licenses(["notice"]) # Apache 2.0 License - -package( - default_visibility = [ - "//tensorflow_lattice:__subpackages__", - ], -) - -exports_files(["LICENSE"]) - -# All python tests can run under python 2 and 3. - -# estimators. -py_library( - name = "hparams", - srcs = ["hparams.py"], - srcs_version = "PY2AND3", - deps = [ - "@six_archive//:six", - "//tensorflow_lattice/python:regularizers", - ], -) - -py_test( - name = "hparams_test", - size = "small", - srcs = ["hparams_test.py"], - srcs_version = "PY2AND3", - deps = [ - ":hparams", - "@org_tensorflow//third_party/py/numpy", - "@org_tensorflow//tensorflow:tensorflow_py", - ], -) - -py_library( - name = "base", - srcs = ["base.py"], - srcs_version = "PY2AND3", - deps = [ - "@org_tensorflow//tensorflow:tensorflow_py", - "//tensorflow_lattice/python:tools", - ], -) - -py_test( - name = "base_test", - size = "medium", - srcs = ["base_test.py"], - srcs_version = "PY2AND3", - deps = [ - ":base", - "@org_tensorflow//tensorflow:tensorflow_py", - "//tensorflow_lattice/python:test_data", - ], -) - -py_library( - name = "calibrated", - srcs = ["calibrated.py"], - srcs_version = "PY2AND3", - deps = [ - ":base", - ":hparams", - "@six_archive//:six", - "@org_tensorflow//tensorflow:tensorflow_py", - "//tensorflow_lattice/python:keypoints_initialization", - "//tensorflow_lattice/python:pwl_calibration_layers", - "//tensorflow_lattice/python:regularizers", - "//tensorflow_lattice/python:tools", - ], -) - -py_test( - name = "calibrated_test", - size = "small", - srcs = ["calibrated_test.py"], - srcs_version = "PY2AND3", - deps = [ - ":calibrated", - "@org_tensorflow//tensorflow:tensorflow_py", - ], -) - -py_library( - name = "calibrated_linear", - srcs = ["calibrated_linear.py"], - srcs_version = "PY2AND3", - deps = [ - ":calibrated", - ":hparams", - "@org_tensorflow//tensorflow:tensorflow_py", - ], -) - -py_test( - name = "calibrated_linear_test", - size = "large", - srcs = ["calibrated_linear_test.py"], - srcs_version = "PY2AND3", - deps = [ - ":calibrated_linear", - "@org_tensorflow//tensorflow:tensorflow_py", - "//tensorflow_lattice/python:test_data", - ], -) - -py_library( - name = "calibrated_lattice", - srcs = ["calibrated_lattice.py"], - srcs_version = "PY2AND3", - deps = [ - ":calibrated", - ":hparams", - "@org_tensorflow//tensorflow:tensorflow_py", - "//tensorflow_lattice/python:lattice_layers", - "//tensorflow_lattice/python:regularizers", - ], -) - -py_test( - name = "calibrated_lattice_test", - size = "large", - srcs = ["calibrated_lattice_test.py"], - srcs_version = "PY2AND3", - deps = [ - ":calibrated_lattice", - "@org_tensorflow//tensorflow:tensorflow_py", - "//tensorflow_lattice/python:test_data", - ], -) - -py_library( - name = "calibrated_rtl", - srcs = ["calibrated_rtl.py"], - srcs_version = "PY2AND3", - deps = [ - ":calibrated", - ":hparams", - "@six_archive//:six", - "@org_tensorflow//tensorflow:tensorflow_py", - "//tensorflow_lattice/python:lattice_layers", - "//tensorflow_lattice/python:regularizers", - ], -) - -py_test( - name = "calibrated_rtl_test", - size = "large", - srcs = ["calibrated_rtl_test.py"], - shard_count = 4, - srcs_version = "PY2AND3", - deps = [ - ":calibrated_rtl", - "@org_tensorflow//tensorflow:tensorflow_py", - "//tensorflow_lattice/python:test_data", - ], -) - -py_library( - name = "calibrated_etl", - srcs = ["calibrated_etl.py"], - srcs_version = "PY2AND3", - deps = [ - ":calibrated", - ":hparams", - "@org_tensorflow//tensorflow:tensorflow_py", - "//tensorflow_lattice/python:lattice_layers", - "//tensorflow_lattice/python:monotone_linear_layers", - "//tensorflow_lattice/python:pwl_calibration_layers", - "//tensorflow_lattice/python:regularizers", - ], -) - -py_test( - name = "calibrated_etl_test", - size = "large", - srcs = ["calibrated_etl_test.py"], - shard_count = 4, - srcs_version = "PY2AND3", - deps = [ - ":calibrated_etl", - "@org_tensorflow//tensorflow:tensorflow_py", - "//tensorflow_lattice/python:test_data", - ], -) - -py_library( - name = "separately_calibrated_rtl", - srcs = ["separately_calibrated_rtl.py"], - srcs_version = "PY2AND3", - deps = [ - ":calibrated", - ":hparams", - "@six_archive//:six", - "@org_tensorflow//tensorflow:tensorflow_py", - "//tensorflow_lattice/python:lattice_layers", - "//tensorflow_lattice/python:regularizers", - ], -) - -py_test( - name = "separately_calibrated_rtl_test", - size = "large", - srcs = ["separately_calibrated_rtl_test.py"], - shard_count = 9, - srcs_version = "PY2AND3", - deps = [ - ":separately_calibrated_rtl", - "@org_tensorflow//tensorflow:tensorflow_py", - "//tensorflow_lattice/python:test_data", - ], -) diff --git a/tensorflow_lattice/python/estimators/base.py b/tensorflow_lattice/python/estimators/base.py deleted file mode 100644 index 75664a5..0000000 --- a/tensorflow_lattice/python/estimators/base.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Base class for generic estimators that handles boilerplate code.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import abc -# Dependency imports -import tensorflow as tf - -from tensorflow_lattice.python.lib import tools -from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import - -# Scope for variable names. -_SCOPE_TENSORFLOW_LATTICE_PREFIX = "tfl_" -_TRAIN_OP_NAME = "tfl_train_op" - - -class _ProjectionHook(tf.estimator.SessionRunHook): - """SessionRunHook to project to feasible space after each step.""" - - def __init__(self): - self._projection_ops = [] - - def set_projection_ops(self, projection_ops): - """Needs to be called in model_fn function, with ops to project.""" - self._projection_ops = projection_ops - - def after_run(self, run_context, run_values): - if self._projection_ops is not None: - run_context.session.run(self._projection_ops) - - def end(self, session): - if self._projection_ops is not None: - session.run(self._projection_ops) - - -class Base(tf.estimator.Estimator): - """Base class for generic models. - - It provides minimal preprocessing of the input features, sets up the hook that - runs projections at each step (typically used to project parameters to be - monotone and within bounds), and adds the appropriate head to the model. - - To extend one has to implement the method prediction_builder() - """ - __metaclass__ = abc.ABCMeta - - def __init__(self, - n_classes, - feature_columns=None, - model_dir=None, - optimizer=None, - config=None, - hparams=None, - head=None, - weight_column=None, - dtype=tf.float32, - name="model"): - """Construct Classifier/Regressor. - - Args: - n_classes: Number of classes, set to 0 if used for regression. If head is - not provided, only n_classes = 0 or 2 are currently supported. - feature_columns: Optional, if not set the model will use all features - returned by input_fn. An iterable containing all the feature columns - used by the model. All items in the set should be instances of classes - derived from `FeatureColumn` and are used to transform the input columns - into a numeric format that is fed into the rest of the graph. - model_dir: Directory to save model parameters, graph and etc. This can - also be used to load checkpoints from the directory into a estimator to - continue training a previously saved model. - optimizer: `Optimizer` object, or callable (with no inputs) that returns - an `Optimizer` object, defines the optimizer to use for training. This - is typically one of the optimizers defined in tf.train. - config: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). - hparams: Hyper parameter object to be passed to prediction builder. - head: a `TensorFlow Estimator Head` which specifies how the loss function, - final predictions, and so on are generated from model outputs. Defaults - to using a sigmoid cross entropy head for binary classification and mean - squared error head for regression. - weight_column: A string or a `tf.feature_column.numeric_column` defining - feature column representing weights. It is used to down weight or boost - examples during training. It will be multiplied by the loss of the - example. - dtype: The internal type to be used for tensors. - name: Name to be used as suffix to top-level variable scope for model. - - Raises: - ValueError: invalid parameters. - KeyError: type of feature not supported. - """ - # We sort the list of feature_columns here, since we will later create - # the ops that implement their represented transformations (e.g. embedding) - # in the order in which they are listed in self._feature_columns. - # The constructed ops are then given names by the tensorflow framework - # that depend on their creation order (for example, if two ops have the - # same type they will be suffixed by an ordinal reflecting the creation - # order). As this code must be deterministic (since it could be - # executed in a multi-machine tensorflow cluster), we must have the order - # of feature columns deterministic as well (which would not be the case if - # it's, for example, the result of calling keys() on a dictionary); thus - # we sort the feature columns here by their names. - self._feature_columns = (None if feature_columns is None else - tools.get_sorted_feature_columns(feature_columns)) - self._weight_column = weight_column - self._optimizer = optimizer - self._config = config - self._hparams = hparams - self._name = _SCOPE_TENSORFLOW_LATTICE_PREFIX + name - self._n_classes = n_classes - self._dtype = dtype - - if head is not None: - self._head = head - else: - if n_classes == 0: - self._head = ( - tf.contrib.estimator.regression_head( - label_dimension=1, - weight_column=self._weight_column, - loss_reduction=tf.compat.v1.losses.Reduction.SUM)) - elif n_classes == 2: - self._head = ( - tf.contrib.estimator.binary_classification_head( - weight_column=self._weight_column, - loss_reduction=tf.compat.v1.losses.Reduction.SUM)) - else: - raise ValueError("Invalid value for n_classes=%d" % n_classes) - - super(Base, self).__init__( - model_fn=self._base_model_fn, model_dir=model_dir, config=config) - - # Make sure model directory exists after initialization. - # Notice self.model_dir is set by Estimator class. - file_io.recursive_create_dir(self.model_dir) - - self._projection_hook = _ProjectionHook() - - @abc.abstractmethod - def prediction_builder(self, columns_to_tensors, mode, hparams, dtype): - """Method to be specialized that builds the prediction graph. - - Args: - columns_to_tensors: A map from feature_name to raw features tensors, each - with shape `[batch_size]` or `[batch_size, feature_dim]`. - mode: Estimator's `ModeKeys`. - hparams: hyperparameters object passed to prediction builder. This is not - used by the Base estimator itself and is passed without checks or any - processing and can be of any type. - dtype: The dtype to be used for tensors. - - Returns: - A tuple of (prediction_tensor, projection_ops, regularization_loss) of - type (tf.Tensor, list[], tf.Tensor): - prediction_tensor: shaped `[batch_size/?,1]` for regression or binary - classification, or `[batch_size, n_classes]` for multi-class - classifiers. For classifier this will be the logit(s) value(s). - projection_ops: list of projection ops to be applied after each batch, - or None. - regularization_loss: loss related to regularization or None. - """ - raise NotImplementedError( - "This method must be implemented in a child class") - - - def _base_model_fn(self, features, labels, mode, config): # pylint: disable=unused-argument - """Creates the prediction, loss, and train ops. - - Args: - features: A dictionary of tensors keyed by the feature name. - labels: A tensor representing the label. - mode: The execution mode, as defined in tf.estimator.ModeKeys. - config: Optional configuration object. Will receive what is passed to - Estimator in `config` parameter, or the default `config`. Allows - updating things in your model_fn based on configuration such as - `num_ps_replicas`. - - Returns: - ModelFnOps, with the predictions, loss, and train_op. - - Raises: - ValueError: if incompatible parameters are given. - """ - with tf.compat.v1.variable_scope(self._name): - if self._feature_columns is None: - columns_to_tensors = features.copy() - else: - with tf.compat.v1.variable_scope("feature_column_transformation"): - columns_to_tensors = { - feature_column.name: - tools.input_from_feature_column(features.copy(), feature_column, - self._dtype) - for feature_column in self._feature_columns - } - (prediction, projection_ops, - regularization) = self.prediction_builder(columns_to_tensors, mode, - self._hparams, self._dtype) - - def _train_op_fn(loss): - """Returns train_op tensor if TRAIN mode, or None.""" - train_op = None - if mode == tf.estimator.ModeKeys.TRAIN: - if regularization is not None: - loss += regularization - tf.compat.v1.summary.scalar("loss_with_regularization", loss) - optimizer = self._optimizer - if optimizer is None: - optimizer = tf.compat.v1.train.AdamOptimizer - if callable(optimizer): - optimizer = optimizer() - train_op = optimizer.minimize( - loss, - global_step=tf.compat.v1.train.get_global_step(), - name=_TRAIN_OP_NAME) - self._projection_hook.set_projection_ops(projection_ops) - return train_op - - # Use head to generate model_fn outputs. - estimator_spec = self._head.create_estimator_spec( - features=features, - labels=labels, - mode=mode, - train_op_fn=_train_op_fn, - logits=prediction) - - # Update chief worker's training session run hooks to include - # projection_hook. This means that in a distributed setting, only the - # chief worker will run the projection op after its own update and without - # synchronization with other workers. Thus, the parameters may temporary - # leave the feasible space. - if mode == tf.estimator.ModeKeys.TRAIN: - updated_training_chief_hooks = ( - estimator_spec.training_chief_hooks + (self._projection_hook,)) - estimator_spec = estimator_spec._replace( - training_chief_hooks=updated_training_chief_hooks) - - return estimator_spec diff --git a/tensorflow_lattice/python/estimators/base_test.py b/tensorflow_lattice/python/estimators/base_test.py deleted file mode 100644 index 527d12c..0000000 --- a/tensorflow_lattice/python/estimators/base_test.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Base estimator is tested with a simple linear model implementation.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import six -import tensorflow as tf - -from tensorflow_lattice.python.estimators import base -from tensorflow_lattice.python.lib import test_data - - -class _BaseLinear(base.Base): - """Base class for BaseLinearClassifier and BaseLinearRegressor.""" - - def __init__(self, - n_classes, - feature_columns=None, - model_dir=None, - hparams=None): - """Construct LinearClassifier/Regressor.""" - super(_BaseLinear, self).__init__( - n_classes=n_classes, - feature_columns=feature_columns, - model_dir=model_dir, - hparams=hparams, - name='linear') - - def prediction_builder(self, columns_to_tensors, mode, hparams, dtype): - unstacked_inputs = [] - for tensor in six.itervalues(columns_to_tensors): - if tensor.shape.ndims == 1: - unstacked_inputs.append(tensor) - elif tensor.shape.ndims == 2: - unstacked_inputs.extend(tf.unstack(tensor, axis=1)) - input_tensor = tf.stack(unstacked_inputs, axis=1, name='stack') - weights = tf.compat.v1.get_variable( - 'weights', - initializer=tf.zeros(shape=[len(unstacked_inputs), 1], dtype=dtype)) - prediction = tf.reshape( - tf.tensordot(input_tensor, weights, axes=1, name='tensordot'), [-1, 1]) - # Add ridge regularizer. - regularization = tf.reduce_sum(tf.square(weights)) - # Add a projection that forces the weight vector to be 0. - projeciton_ops = [weights.assign_sub(weights)] - return prediction, projeciton_ops, regularization - - -class BaseTest(tf.test.TestCase): - - def setUp(self): - super(BaseTest, self).setUp() - self._test_data = test_data.TestData() - - def _TestRegressor(self, feature_columns, input_fn): - estimator = _BaseLinear(n_classes=0, feature_columns=feature_columns) - estimator.train(input_fn=input_fn) - preds = [p['predictions'][0] for p in estimator.predict(input_fn=input_fn)] - self.assertAllClose(preds, [0.0] * len(preds), 1e-7) - - def _TestCalssifier(self, feature_columns, input_fn): - estimator = _BaseLinear(n_classes=2, feature_columns=feature_columns) - estimator.train(input_fn=input_fn) - preds = [p['logits'][0] for p in estimator.predict(input_fn=input_fn)] - self.assertAllClose(preds, [0.0] * len(preds), 1e-7) - - def testBaseLinearRegressorTraining1D(self): - feature_columns = [ - tf.feature_column.numeric_column('x'), - ] - self._TestRegressor(feature_columns, self._test_data.oned_input_fn()) - - def testBaseLinearRegressorTraining3D(self): - # Tests also a categorical feature with vocabulary list. - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - tf.feature_column.categorical_column_with_vocabulary_list( - 'x2', ['Y', 'N']) - ] - self._TestRegressor(feature_columns, - self._test_data.threed_input_fn(False, 1)) - - def testBaseLinearRegressorTrainingMultiDimensionalFeature(self): - feature_columns = [ - tf.feature_column.numeric_column('x', shape=(2,)), - ] - self._TestRegressor(feature_columns, - self._test_data.multid_feature_input_fn()) - - def testBaseLinearClassifierTraining(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - self._TestCalssifier(feature_columns, - self._test_data.twod_classificer_input_fn()) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_lattice/python/estimators/calibrated.py b/tensorflow_lattice/python/estimators/calibrated.py deleted file mode 100644 index 5c1df76..0000000 --- a/tensorflow_lattice/python/estimators/calibrated.py +++ /dev/null @@ -1,515 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Base class for TensorFlow Lattice estimators with input calibration.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import abc -import six -import tensorflow as tf - -from tensorflow_lattice.python.estimators import base -from tensorflow_lattice.python.estimators import hparams as tf_lattice_hparams -from tensorflow_lattice.python.lib import keypoints_initialization -from tensorflow_lattice.python.lib import pwl_calibration_layers -from tensorflow_lattice.python.lib import regularizers -from tensorflow_lattice.python.lib import tools - -# Scope for variable names. -_SCOPE_CALIBRATED_PREFIX = "calibrated_" -_SCOPE_INPUT_CALIBRATION = "input_calibration" - - -def _get_feature_dict(features): - if isinstance(features, dict): - return features - return {"": features} - - -def _get_optimizer(optimizer, hparams): - """Materializes the optimizer into a tf.train optimizer object.""" - if optimizer is None: - optimizer = tf.compat.v1.train.AdamOptimizer - if callable(optimizer): - learning_rate = hparams.get_param("learning_rate") - if learning_rate is None: - return optimizer() - else: - return optimizer(learning_rate=learning_rate) - else: - return optimizer - - -def _get_per_feature_dict(hparams, param_name, default_value=None): - """Creates dict with values returned by hparams for param for each feature.""" - if not issubclass(type(hparams), tf_lattice_hparams.PerFeatureHParams): - raise ValueError( - "hparams passed to Estimator is not a subclass of " - "tensorflow_lattice.PerFeatureHParams, it can't figure out parameters " - "for calibration") - return { - feature_name: hparams.get_feature_param(feature_name, param_name, - default_value) - for feature_name in hparams.get_feature_names() - } - - -def _call_keypoints_inializers_fn(keypoints_initializers_fn): - """Call the closure and check/return results.""" - if callable(keypoints_initializers_fn): - kp_init = keypoints_initializers_fn() - if (len(kp_init) != 2 or not issubclass(type(kp_init[0]), tf.Tensor) or - not issubclass(type(kp_init[1]), tf.Tensor)): - raise ValueError( - "invalid value returned by keypoints_initializers_fn, expected a " - "pair of tensors, got %s" % kp_init) - return kp_init - elif isinstance(keypoints_initializers_fn, dict): - return { - k: _call_keypoints_inializers_fn(v) - for k, v in six.iteritems(keypoints_initializers_fn) - } - else: - raise ValueError("Unknown type for keypoints_initializers_fn: %s" % - type(keypoints_initializers_fn)) - - -def _update_keypoints(feature_name, asked_keypoints, kp_init_keypoints): - """Updates num_keypoints according to availability.""" - if not asked_keypoints or kp_init_keypoints == asked_keypoints: - # Meet asked_keypoints if no calibration was asked for this feature, - # or if the correct number of kp_init_keypoints are available. - return asked_keypoints - if kp_init_keypoints < asked_keypoints: - # If fewer keypoints were returned by init functions, emit debug - # message and return those available. - tf.compat.v1.logging.debug( - "Using {} keypoints for calibration of {} instead of " - "the requested {}".format(kp_init_keypoints, feature_name, - asked_keypoints)) - return kp_init_keypoints - raise ValueError("Calibration initialization returned more keypoints ({}) " - "than requested ({}) for feature {}".format( - kp_init_keypoints, asked_keypoints, feature_name)) - - -def input_calibration_layer_from_hparams(columns_to_tensors, - hparams, - quantiles_dir=None, - keypoints_initializers=None, - name=None, - dtype=tf.float32): - """Creates a calibration layer for the input using hyper-parameters. - - Similar to `input_calibration_layer` but reads its parameters from a - `CalibratedHParams` object. - - Args: - columns_to_tensors: A mapping from feature name to tensors. - hparams: Hyper-parameters, need to inherit from `CalibratedHParams`. See - `CalibratedHParams` and `input_calibration_layer` for descriptions of how - these hyper-parameters work. - quantiles_dir: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be generated - with `pwl_calibration_layers.calculate_quantiles_for_keypoints`, maybe in - a separate invocation of your program. Different models that share the - same quantiles information -- so this needs to be generated only once when - hyper-parameter tuning. If you don't want to use quantiles, you can set - `keypoints_initializers` instead. - keypoints_initializers: if you know the distribution of your input features - you can provide that directly instead of `quantiles_dir`. See - `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be a pair - of tensors with keypoints inputs and outputs to use for initialization - (must match `num_keypoints` configured in `hparams`). Alternatively can be - given as a dict mapping feature name to pairs, for initialization per - feature. If `quantiles_dir` and `keypoints_initializer` are set, the - latter takes precendence, and the features for which - `keypoints_initializers` are not defined fallback to using the quantiles - found in `quantiles_dir`. - name: Name scope for layer. - dtype: If any of the scalars are not given as tensors, they are converted to - tensors with this dtype. - - Returns: - A tuple of: - * calibrated tensor of shape [batch_size, sum(features dimensions)]. - * list of the feature names in the order they appear in the calibrated - tensor. A name may appear more than once if the feature is - multi-dimension (for instance a multi-dimension embedding) - * list of projection ops, that must be applied at each step (or every so - many steps) to project the model to a feasible space: used for bounding - the outputs or for imposing monotonicity. Empty if none are requested. - * tensor with regularization loss, or None for no regularization. - - Raises: - ValueError: if dtypes are incompatible. - - - """ - with tf.name_scope(name or "input_calibration_layer_from_hparams"): - - # Sort out list of feature names. - unique_feature_names = tools.get_sorted_feature_names( - columns_to_tensors=columns_to_tensors) - - # Get per-feature parameters. - num_keypoints = _get_per_feature_dict(hparams, "num_keypoints") - calibration_output_min = _get_per_feature_dict(hparams, - "calibration_output_min") - calibration_output_max = _get_per_feature_dict(hparams, - "calibration_output_max") - calibration_bound = _get_per_feature_dict(hparams, "calibration_bound") - monotonicity = _get_per_feature_dict(hparams, "monotonicity") - missing_input_values = _get_per_feature_dict(hparams, "missing_input_value") - missing_output_values = _get_per_feature_dict(hparams, - "missing_output_value") - - # Convert keypoints_initializers to a dict if needed, or otherwise make a - # copy of the original keypoints_initializers dict. - if keypoints_initializers is None: - keypoints_initializers = {} - elif not isinstance(keypoints_initializers, dict): - keypoints_initializers = { - name: keypoints_initializers for name in unique_feature_names - } - else: - keypoints_initializers = keypoints_initializers.copy() - - # If quantiles_dir is given, add any missing keypoint initializers with - # keypoints based on quantiles. - if quantiles_dir is not None: - quantiles_feature_names = [ - name for name in unique_feature_names - if name not in keypoints_initializers - ] - - # Reverse initial output keypoints for decreasing monotonic features. - reversed_dict = { - feature_name: (monotonicity[feature_name] == -1) - for feature_name in quantiles_feature_names - } - - # Read initializers from quantiles_dir, for those not already - # defined. - # - # Notice that output_min and output_max won't matter much if - # they are not bounded, since they will be adjusted during training. - quantiles_init = keypoints_initialization.load_keypoints_from_quantiles( - feature_names=quantiles_feature_names, - save_dir=quantiles_dir, - num_keypoints=num_keypoints, - output_min=calibration_output_min, - output_max=calibration_output_max, - reversed_dict=reversed_dict, - missing_input_values_dict=missing_input_values, - dtype=dtype) - - # Merge with explicit initializers. - keypoints_initializers.update(quantiles_init) - - # Update num_keypoints according to keypoints actually used by the - # initialization functions: some initialization functions may change - # them, for instance if there are not enough unique values. - for (feature_name, initializers) in six.iteritems(keypoints_initializers): - kp_init_keypoints = initializers[0].shape.as_list()[0] - num_keypoints[feature_name] = _update_keypoints( - feature_name, num_keypoints[feature_name], kp_init_keypoints) - - # Setup the regularization. - regularizer_amounts = {} - for regularizer_name in regularizers.CALIBRATOR_REGULARIZERS: - regularizer_amounts[regularizer_name] = _get_per_feature_dict( - hparams, "calibration_{}".format(regularizer_name)) - - return pwl_calibration_layers.input_calibration_layer( - columns_to_tensors=columns_to_tensors, - num_keypoints=num_keypoints, - keypoints_initializers=keypoints_initializers, - bound=calibration_bound, - monotonic=monotonicity, - missing_input_values=missing_input_values, - missing_output_values=missing_output_values, - **regularizer_amounts) - - -class _ProjectionHook(tf.estimator.SessionRunHook): - """SessionRunHook to project to feasible space after each step.""" - - def __init__(self): - self._projection_ops = [] - - def set_projection_ops(self, projection_ops): - """Needs to be called in model_fn function, with ops to project.""" - self._projection_ops = projection_ops - - def after_run(self, run_context, run_values): - if self._projection_ops is not None: - run_context.session.run(self._projection_ops) - - -class Calibrated(base.Base): - """Base class for TensorFlow calibrated models. - - It provides preprocessing and calibration of the input features, and - sets up the hook that runs projections at each step -- typically used - to project parameters to be monotone and within bounds. - - To extend one has to implement the method prediction_builder() - """ - __metaclass__ = abc.ABCMeta - - def __init__(self, - n_classes, - feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None, - head=None, - weight_column=None, - name="model"): - """Construct CalibrateLinearClassifier/Regressor. - - Args: - n_classes: Number of classes, set to 0 if used for regression. - feature_columns: Optional, if not set the model will use all features - returned by input_fn. An iterable containing all the feature columns - used by the model. All items in the set should be instances of classes - derived from `FeatureColumn`. - model_dir: Directory to save model parameters, graph and etc. This can - also be used to load checkpoints from the directory into a estimator to - continue training a previously saved model. - quantiles_dir: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated only once with - `pwl_calibration_layers.calculate_quantiles_for_keypoints` in a separate - invocation of your program. If you don't want to use quantiles, you can - set `keypoints_initializer` instead. - keypoints_initializers_fn: if you know the distribution of your input - features you can provide that directly instead of `quantiles_dir`. See - `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be a - closure that returns a pair of tensors with keypoints inputs and outputs - to use for initialization (must match `num_keypoints` configured in - `hparams`). Alternatively the closure can return a dict mapping feature - name to pairs for initialization per feature. If `quantiles_dir` and - `keypoints_initializers_fn` are set, the later takes precendence, and - the features for which `keypoints_initializers` are not defined fallback - to using the quantiles found in `quantiles_dir`. It uses a closure - instead of the tensors themselves because the graph has to be created at - the time the model is being build, which happens at a later time. - optimizer: `Optimizer` object, or callable that defines the optimizer to - use for training -- if a callable, it will be called with - learning_rate=hparams.learning_rate if provided. - config: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). - hparams: an instance of tf_lattice_hparams.CalibrationHParams. If set to - None default parameters are used. - head: a `TensorFlow Estimator Head` which specifies how the loss function, - final predictions, and so on are generated from model outputs. Defaults - to using a sigmoid cross entropy head for binary classification and mean - squared error head for regression. - weight_column: A string or a `tf.feature_column.numeric_column` defining - feature column representing weights. It is used to down weight or boost - examples during training. It will be multiplied by the loss of the - example. - name: Name to be used as suffix to top-level variable scope for model. - - Raises: - ValueError: invalid parameters. - KeyError: type of feature not supported. - """ - super(Calibrated, self).__init__( - n_classes=n_classes, - feature_columns=feature_columns, - model_dir=model_dir, - optimizer=_get_optimizer(optimizer, hparams), - config=config, - hparams=hparams, - head=head, - weight_column=weight_column, - dtype=tf.float32, - name=_SCOPE_CALIBRATED_PREFIX + name) - - self._quantiles_dir = quantiles_dir - self._keypoints_initializers_fn = keypoints_initializers_fn - - if self._hparams is None: - raise ValueError("hparams cannot be none") - if not issubclass( - type(self._hparams), tf_lattice_hparams.CalibratedHParams): - raise ValueError("hparams is not an instance of hparams.CalibratedHParams" - ", got type(params)=%s" % type(self._hparams)) - - @abc.abstractmethod - def calibration_structure_builder(self, columns_to_tensors, hparams): - """Method to be specialized that builds the calibration structure. - - Derived classes should override this method to return the set of features - used in each separately calibrated submodel, or return None to indicate - all features should be calibrated only once. - - Args: - columns_to_tensors: A mapping from feature name to tensors. - hparams: hyperparameters passed to object constructor. - - Returns: - calibration_structure: list of sub_columns_to_tensors corresponding to the - features used in each sub-model, or None to indicate that this is a - single model structure that uses all features. Each element is a dict - from feature name to tensors in the same format as the input - columns_to_tensors. - """ - raise NotImplementedError( - "This method must be implemented in a child class") - - - @abc.abstractmethod - def prediction_builder_from_calibrated(self, mode, - per_dimension_feature_names, hparams, - calibrated): - """Method to be specialized that builds the prediction graph. - - Args: - mode: Estimator's `ModeKeys`. - per_dimension_feature_names: Name of features. The ordering should be - matched with the ordering in calibrated feature tensor. Notice - feature_names may be repeated, if some of the features were originally - multi-dimensional. - hparams: hyperparameters passed to object constructor. - calibrated: calibrated feature tensor, shaped `[batch_size, num_features]` - - Returns: - A tuple of (prediction_tensor, oprojection_ops, regularization_loss) of - type (tf.Tensor, list[], tf.Tensor): - prediction_tensor: shaped `[batch_size/?,1]` for regression or binary - classification, or `[batch_size, n_classes]` for multi-class - classifiers. For classifier this will be the logit(s) value(s). - projection_ops: list of projection ops to be applied after each batch, - or None. - regularization_loss: loss related to regularization or None. - """ - raise NotImplementedError( - "This method must be implemented in a child class") - - def prediction_builder(self, columns_to_tensors, mode, hparams, dtype): - """Method that builds the prediction graph. - - Args: - columns_to_tensors: A map from feature_name to raw features tensors, each - with shape `[batch_size]` or `[batch_size, feature_dim]`. - mode: Estimator's `ModeKeys`. - hparams: hyperparameters object passed to prediction builder. This is not - used by the Base estimator itself and is passed without checks or any - processing and can be of any type. - dtype: The dtype to be used for tensors. - - Returns: - A tuple of (prediction_tensor, oprojection_ops, regularization_loss) of - type (tf.Tensor, list[], tf.Tensor): - prediction_tensor: shaped `[batch_size/?,1]` for regression or binary - classification, or `[batch_size, n_classes]` for multi-class - classifiers. For classifier this will be the logit(s) value(s). - projection_ops: list of projection ops to be applied after each batch, - or None. - regularization_loss: loss related to regularization or None. - Raises: - ValueError: invalid parameters. - """ - if (mode == tf.estimator.ModeKeys.TRAIN and self._quantiles_dir is None and - self._keypoints_initializers_fn is None): - raise ValueError( - "At least one of quantiles_dir or keypoints_initializers_fn " - "must be given for training") - - # If keypoint_initializer closures were given, call them to create the - # initializers tensors. - kp_init_explicit = None - if self._keypoints_initializers_fn is not None: - kp_init_explicit = _call_keypoints_inializers_fn( - self._keypoints_initializers_fn) - - # Add feature names to hparams so that builders can make use of them. - for feature_name in columns_to_tensors: - self._hparams.add_feature(feature_name) - - total_projection_ops = None - total_regularization = None - total_prediction = None - - # Get the ensemble structure. - calibration_structure = self.calibration_structure_builder( - columns_to_tensors, self._hparams) - - if calibration_structure is None: - # Single model or shared calibration. - (calibrated, per_dimension_feature_names, calibration_projections, - calibration_regularization) = ( - input_calibration_layer_from_hparams( - columns_to_tensors=columns_to_tensors, - hparams=self._hparams, - quantiles_dir=self._quantiles_dir, - keypoints_initializers=kp_init_explicit, - name=_SCOPE_INPUT_CALIBRATION, - dtype=self._dtype)) - (total_prediction, prediction_projections, - prediction_regularization) = self.prediction_builder_from_calibrated( - mode, per_dimension_feature_names, self._hparams, calibrated) - total_projection_ops = tools.add_if_not_none(calibration_projections, - prediction_projections) - total_regularization = tools.add_if_not_none(calibration_regularization, - prediction_regularization) - else: - # Ensemble model with separate calibration. - predictions = [] - for (index, sub_columns_to_tensors) in enumerate(calibration_structure): - # Calibrate. - with tf.compat.v1.variable_scope("submodel_{}".format(index)): - (calibrated, per_dimension_feature_names, calibration_projections, - calibration_regularization) = ( - input_calibration_layer_from_hparams( - columns_to_tensors=sub_columns_to_tensors, - hparams=self._hparams, - quantiles_dir=self._quantiles_dir, - keypoints_initializers=kp_init_explicit, - name=_SCOPE_INPUT_CALIBRATION, - dtype=self._dtype)) - (prediction, prediction_projections, - prediction_regularization) = self.prediction_builder_from_calibrated( - mode, per_dimension_feature_names, self._hparams, calibrated) - projection_ops = tools.add_if_not_none(calibration_projections, - prediction_projections) - regularization = tools.add_if_not_none(calibration_regularization, - prediction_regularization) - - # Merge back the results. - total_projection_ops = tools.add_if_not_none(total_projection_ops, - projection_ops) - total_regularization = tools.add_if_not_none(total_regularization, - regularization) - predictions.append(prediction) - - # Final prediction is a mean of predictions, plus a bias term. - stacked_predictions = tf.stack( - predictions, axis=0, name="stacked_predictions") - ensemble_output = tf.reduce_mean(stacked_predictions, axis=0) - ensemble_bias_init = self._hparams.get_param("ensemble_bias") - bias = tf.Variable([ensemble_bias_init], name="ensemble_bias") - total_prediction = ensemble_output + bias - - return total_prediction, total_projection_ops, total_regularization diff --git a/tensorflow_lattice/python/estimators/calibrated_etl.py b/tensorflow_lattice/python/estimators/calibrated_etl.py deleted file mode 100644 index 480e08c..0000000 --- a/tensorflow_lattice/python/estimators/calibrated_etl.py +++ /dev/null @@ -1,685 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""CalibratedEtl canned estimators.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import copy - -import tensorflow as tf - -from tensorflow_lattice.python.estimators import calibrated as calibrated_lib -from tensorflow_lattice.python.estimators import hparams as tfl_hparams -from tensorflow_lattice.python.lib import keypoints_initialization -from tensorflow_lattice.python.lib import lattice_layers -from tensorflow_lattice.python.lib import monotone_linear_layers -from tensorflow_lattice.python.lib import pwl_calibration_layers -from tensorflow_lattice.python.lib import regularizers -from tensorflow_lattice.python.lib import tools - -_EPSILON = 1e-7 - - -def _calibration_layer(input_tensor, input_dim, input_min, input_max, - num_keypoints, output_min, output_max): - """Create an intermediate calibration layer.""" - init_keypoints = keypoints_initialization.uniform_keypoints_for_signal( - num_keypoints=num_keypoints, - input_min=input_min, - input_max=input_max, - output_min=output_min, - output_max=output_max, - dtype=input_tensor.dtype) - packed_results = pwl_calibration_layers.calibration_layer( - input_tensor, - num_keypoints=num_keypoints, - keypoints_initializers=[init_keypoints] * input_dim, - bound=True, - monotonic=+1) - (calibrated_input_tensor, projection_ops, _) = packed_results - return (calibrated_input_tensor, projection_ops) - - -def _ensemble_lattices_layer( - input_tensor, input_dim, output_dim, interpolation_type, calibration_min, - calibration_max, calibration_num_keypoints, num_lattices, lattice_rank, - lattice_size, regularizer_amounts, is_monotone): - """Creates an ensemble of lattices layer.""" - projections = [] - structures = [ - range(lattice_cnt * lattice_rank, (lattice_cnt + 1) * lattice_rank) - for lattice_cnt in range(num_lattices) - ] - calibrated_input, proj = _calibration_layer( - input_tensor, - input_dim, - calibration_min, - calibration_max, - calibration_num_keypoints, - output_min=0, - output_max=lattice_size - 1) - if proj: - projections += proj - lattice_outputs, _, proj, reg = lattice_layers.ensemble_lattices_layer( - calibrated_input, [lattice_size] * input_dim, - structures, - is_monotone=is_monotone, - output_dim=output_dim, - interpolation_type=interpolation_type, - **regularizer_amounts) - if proj: - projections += proj - return lattice_outputs, projections, reg - - -def _embedded_lattices(calibrated_input_tensor, - input_dim, - output_dim, - interpolation_type, - monotonic_num_lattices, - monotonic_lattice_rank, - monotonic_lattice_size, - non_monotonic_num_lattices, - non_monotonic_lattice_rank, - non_monotonic_lattice_size, - linear_embedding_calibration_min, - linear_embedding_calibration_max, - linear_embedding_calibration_num_keypoints, - regularizer_amounts, - is_monotone=None): - """Creates an ensemble of lattices with a linear embedding. - - This function constructs the following deep lattice network: - calibrated_input -> linear_embedding -> calibration -> ensemble of lattices. - Then ensemble of lattices' output are averaged and bias term is added to make - a final prediction. - - ensemble of lattices is consists of two parts: monotonic lattices and - non-monotonic lattices. The input to the monotonic lattices is an output of - linear_embedding that contains both monotonic and non-monotonic - calibrated_input. All inputs to the monotonic lattices are set to be monotonic - to preserve end-to-end monotonicity in the monotonic feature. - The input to the non-monotonic lattices is an output of linear_embedding that - only contains non-monotonic calibrated_input. All inputs to the non-monotonic - lattices are set to be non-monotonic, since we do not need to guarantee - monotonicity. - - Args: - calibrated_input_tensor: [batch_size, input_dim] tensor. - input_dim: (int) input dimnension. - output_dim: (int) output dimension. - interpolation_type: defines whether the lattice will interpolate using the - full hypercube or only the simplex ("hyper-triangle") around the point - being evaluated. Valid values: 'hypercube' or 'simplex' - monotonic_num_lattices: (int) number of monotonic lattices in the ensemble - lattices layer. - monotonic_lattice_rank: (int) number of inputs to each monotonic lattice in - the ensemble lattices layer. - monotonic_lattice_size: (int) lattice cell size for each monotonic lattice - in the ensemble lattices layer. - non_monotonic_num_lattices: (int) number of non monotonic lattices in the - ensemble lattices layer. - non_monotonic_lattice_rank: (int) number of inputs to each non monotonic - lattice in the ensemble lattices layer. - non_monotonic_lattice_size: (int) lattice cell size for each non monotonic - lattice in the ensemble lattices layer. - linear_embedding_calibration_min: (float) a minimum input keypoints value - for linear_embedding calibration. - linear_embedding_calibration_max: (float) a maximum input keypoints value - for linear_embedding calibration. - linear_embedding_calibration_num_keypoints: (int) a number of eypoints for - linear_embedding calibration. - regularizer_amounts: Dict of regularization amounts passed as keyword args - to regularizers.lattice_regularization(). - is_monotone: (bool, list of booleans) is_monotone[k] == true then - calibrated_input_tensor[:, k] is considered to be a monotonic input. - Returns: - A tuple of (output_tensor, projection_ops, regularization). - Raises: - ValueError: If there is no non-monotonic inputs but - non_monotonic_num_lattices is not zero. - """ - projections = [] - regularization = None - - # Explictly assign number of lattices to zero for any empty cases. - if not monotonic_num_lattices: - monotonic_num_lattices = 0 - if not non_monotonic_num_lattices: - non_monotonic_num_lattices = 0 - - # Step 1. Create a linear embedding. - if monotonic_num_lattices: - monotonic_embedding_dim = monotonic_num_lattices * monotonic_lattice_rank - else: - monotonic_num_lattices = 0 - monotonic_embedding_dim = 0 - if non_monotonic_num_lattices: - non_monotonic_embedding_dim = ( - non_monotonic_num_lattices * non_monotonic_lattice_rank) - else: - non_monotonic_num_lattices = 0 - non_monotonic_embedding_dim = 0 - - if is_monotone is not None: - is_monotone = tools.cast_to_list(is_monotone, input_dim, 'is_monotone') - with tf.compat.v1.variable_scope('linear_embedding'): - packed_results = monotone_linear_layers.split_monotone_linear_layer( - calibrated_input_tensor, - input_dim, - monotonic_embedding_dim, - non_monotonic_embedding_dim, - is_monotone=is_monotone) - (monotonic_output, _, non_monotonic_output, _, proj, _) = packed_results - if proj is not None: - projections.append(proj) - - # Step 2. Create ensemble of monotonic lattices. - if monotonic_num_lattices == 0: - m_lattice_outputs = None - else: - with tf.compat.v1.variable_scope('monotonic_lattices'): - m_lattice_outputs, projs, reg = _ensemble_lattices_layer( - monotonic_output, - monotonic_embedding_dim, - output_dim, - interpolation_type, - linear_embedding_calibration_min, - linear_embedding_calibration_max, - linear_embedding_calibration_num_keypoints, - monotonic_num_lattices, - monotonic_lattice_rank, - monotonic_lattice_size, - regularizer_amounts, - is_monotone=True) - if projs: - projections += projs - regularization = tools.add_if_not_none(regularization, reg) - - # Step 3. Construct non-monotonic ensembles. - if non_monotonic_output is None and non_monotonic_num_lattices > 0: - raise ValueError( - 'All input signals are monotonic but the number of non monotonic ' - 'lattices is not zero.') - if non_monotonic_num_lattices == 0: - n_lattice_outputs = None - else: - with tf.compat.v1.variable_scope('non_monotonic_lattices'): - n_lattice_outputs, projs, reg = _ensemble_lattices_layer( - non_monotonic_output, - non_monotonic_embedding_dim, - output_dim, - interpolation_type, - linear_embedding_calibration_min, - linear_embedding_calibration_max, - linear_embedding_calibration_num_keypoints, - non_monotonic_num_lattices, - non_monotonic_lattice_rank, - non_monotonic_lattice_size, - regularizer_amounts, - is_monotone=False) - if projs: - projections += projs - regularization = tools.add_if_not_none(regularization, reg) - - # Step 4. Take average to make a final prediction. - with tf.compat.v1.variable_scope('ensemble_average'): - output = tf.compat.v1.get_variable( - name='ensemble_bias', - initializer=[0.0] * output_dim, - dtype=calibrated_input_tensor.dtype) - if m_lattice_outputs: - output = output + tf.divide( - tf.add_n(m_lattice_outputs), monotonic_num_lattices) - if n_lattice_outputs is not None: - output = output + tf.divide( - tf.add_n(n_lattice_outputs), non_monotonic_num_lattices) - - return (output, projections, regularization) - - -class _CalibratedEtl(calibrated_lib.Calibrated): - """Base class for CalibratedEtl{Classifier|Regressor}.""" - - def __init__(self, - n_classes, - feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None, - feature_engineering_fn=None, - head=None, - weight_column=None): - """Construct CalibrateEtlClassifier/Regressor.""" - if not hparams: - hparams = tfl_hparams.CalibratedEtlHParams([]) - self.check_hparams(hparams) - hparams = self._adjust_calibration_params(hparams) - - super(_CalibratedEtl, - self).__init__(n_classes, feature_columns, model_dir, quantiles_dir, - keypoints_initializers_fn, optimizer, config, hparams, - head, weight_column, 'etl') - # After initialization, we expect model_dir exists. - if self._model_dir is None: - raise ValueError('model_dir is not created') - - def _check_lattices_params(self, hparams): - """Check lattice parameters.""" - monotonic_num_lattices = hparams.get_param('monotonic_num_lattices') - monotonic_lattice_rank = hparams.get_param('monotonic_lattice_rank') - monotonic_lattice_size = hparams.get_param('monotonic_lattice_size') - non_monotonic_num_lattices = hparams.get_param('non_monotonic_num_lattices') - non_monotonic_lattice_rank = hparams.get_param('non_monotonic_lattice_rank') - non_monotonic_lattice_size = hparams.get_param('non_monotonic_lattice_size') - - error_messages = [] - if monotonic_num_lattices is None and non_monotonic_num_lattices is None: - error_messages.append('At least one of monotonic_num_lattices or ' - 'non_monotonic_num_lattices should be provided') - - if monotonic_num_lattices: - if monotonic_lattice_rank is None: - error_messages.append('monotonic_lattice_rank should be specified.') - if monotonic_lattice_size is None: - error_messages.append('monotonic_lattice_size should be specified.') - elif monotonic_lattice_size < 2: - error_messages.append( - 'monotonic_lattice_size cannot be less than 2, but got %d' % - monotonic_lattice_size) - - if non_monotonic_num_lattices: - if non_monotonic_lattice_rank is None: - error_messages.append('non_monotonic_lattice_rank should be specified.') - if non_monotonic_lattice_size is None: - error_messages.append('non_monotonic_lattice_size should be specified.') - elif non_monotonic_lattice_size < 2: - error_messages.append( - 'non_monotonic_lattice_size cannot be less than 2, but got %d' % - non_monotonic_lattice_size) - - return error_messages - - def _adjust_calibration_params(self, hparams): - """Makes sure we have the correct input calibration set up.""" - hparams = copy.deepcopy(hparams) - hparams.set_param('calibration_output_min', -1.) - hparams.set_param('calibration_output_max', 1.) - return hparams - - def _check_not_allowed_feature_params(self, hparams): - not_allowed_feature_params = map( - 'lattice_{}'.format, - regularizers.LATTICE_MULTI_DIMENSIONAL_REGULARIZERS) - error_messages = [] - for param in not_allowed_feature_params: - for feature_name in hparams.get_feature_names(): - if hparams.is_feature_set_param(feature_name, param): - error_messages.append('feature %s sets %s, which is not allowed.' % - (feature_name, param)) - return error_messages - - def _check_per_feature_param_configuration(self, monotonicity, - calibration_bound): - """Check parameter configuration and returns the error messages.""" - error_messages = [] - - if monotonicity not in {-1, 0, +1}: - error_messages.append('monotonicity should be an integer {-1, 0, +1} ' - 'but is %s' % monotonicity) - - if not calibration_bound: - error_messages.append( - 'A deep lattice network expects an bounded input from a calibration ' - 'layer, but calibration_bound is set to be False') - - return error_messages - - def check_hparams(self, hparams): - """Check pre-conditions of hparams. - - Args: - hparams: (tfl_hparams.CalibratedEtlHParams) Hyperparameter to be - examined. - - Raises: - ValueError: If the hyperparameter configuration is invalid, for example - calibration_monotonic is None, but lattice_monotonic is True, then raise - the error with a root cause. - """ - error_messages = self._check_lattices_params(hparams) - # Check global params. - feature_names = hparams.get_feature_names() - packed_feature_values = hparams.get_global_and_feature_params( - ['monotonicity', 'calibration_bound'], feature_names) - default_feature_values, per_feature_values = packed_feature_values - param_error_messages = self._check_per_feature_param_configuration( - *default_feature_values) - if param_error_messages: - error_messages.append('Error message for default feature param:') - error_messages += param_error_messages - - # Check per feature params. hparams.get_feature_names() will only return - # feature names that sets per feature parameters. - for feature_idx in range(len(per_feature_values)): - param_error_messages = self._check_per_feature_param_configuration( - *per_feature_values[feature_idx]) - if param_error_messages: - error_messages.append( - 'Error message for %s feature param:' % feature_names[feature_idx]) - error_messages += param_error_messages - - if error_messages: - raise ValueError( - 'Hyperparameter configuration cannot be used in the calibrated ' - 'etl estimator. Error messages report the issue per feature, but' - ' the parameter may be inherited from global parameter.\nDetailed ' - 'error messsages\n%s' % '\n'.join(error_messages)) - - def calibration_structure_builder(self, columns_to_tensors, hparams): - """Returns the calibration structure of the model. See base class.""" - return None - - def prediction_builder_from_calibrated( - self, mode, per_dimension_feature_names, hparams, calibrated): - """Construct the prediciton.""" - self.check_hparams(hparams) - lattice_monotonic = [(hparams.get_feature_param(f, 'monotonicity') != 0) - for f in per_dimension_feature_names] - monotonic_num_lattices = hparams.get_param('monotonic_num_lattices') - monotonic_lattice_rank = hparams.get_param('monotonic_lattice_rank') - monotonic_lattice_size = hparams.get_param('monotonic_lattice_size') - non_monotonic_num_lattices = hparams.get_param('non_monotonic_num_lattices') - non_monotonic_lattice_rank = hparams.get_param('non_monotonic_lattice_rank') - non_monotonic_lattice_size = hparams.get_param('non_monotonic_lattice_size') - linear_embedding_calibration_min = hparams.get_param( - 'linear_embedding_calibration_min') - linear_embedding_calibration_max = hparams.get_param( - 'linear_embedding_calibration_max') - linear_embedding_calibration_num_keypoints = hparams.get_param( - 'linear_embedding_calibration_num_keypoints') - interpolation_type = hparams.get_param('interpolation_type') - - # Setup the regularization. - regularizer_amounts = {} - for regularizer_name in regularizers.LATTICE_REGULARIZERS: - regularizer_amounts[regularizer_name] = hparams.get_param( - 'lattice_{}'.format(regularizer_name)) - - input_dim = len(per_dimension_feature_names) - output_dim = 1 - return _embedded_lattices( - calibrated, - input_dim, - output_dim, - interpolation_type, - monotonic_num_lattices, - monotonic_lattice_rank, - monotonic_lattice_size, - non_monotonic_num_lattices, - non_monotonic_lattice_rank, - non_monotonic_lattice_size, - linear_embedding_calibration_min, - linear_embedding_calibration_max, - linear_embedding_calibration_num_keypoints, - regularizer_amounts, - is_monotone=lattice_monotonic) - - -def calibrated_etl_classifier(feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None, - head=None, - weight_column=None): - """Calibrated etl binary classifier model. - - - - This model uses a piecewise lattice calibration function on each of the - inputs (parametrized) and then feeds them to ensemble of random lattices. - num_lattices and lattice_rank (number of inputs to each lattice) must be - specified in the hyperparameter. Optionally calibration can be made monotonic. - - It usually requires a preprocessing step on the data, to calculate the - quantiles of each used feature. This can be done locally or in one worker - only before training, in a separate invocation of your program (or directly). - Typically this can be saved (`save_dir` parameter) to the same - directory where the data is. - - Hyper-parameters are given in the form of the object - tfl_hparams.CalibrationEtlHParams. lattice_rank and num_lattices must - be specified; there would be no default value for this. It also takes in - per-feature parameters. - - Internally values will be converted to tf.float32. - - Example: - - ```python - def input_fn_train: ... - def input_fn_eval: ... - - my_feature_columns=[...] - - # Have a separate program flag to generate the quantiles. Need to be run - # only once. - if FLAGS.create_quantiles: - pwl_calibrators_layers.calculate_quantiles_for_keypoints( - input_fn=input_fn_train, - feature_columns=my_feature_columns, - save_dir=FLAGS.data_dir, - num_quantiles=1000, - override=True) - return # Exit program. - - hparams = hparams.CalibratedEtlHparams(num_lattices=10, lattice_rank=2) - estimator = calibrated_etl.calibrated_etl_classifier( - feature_columns=feature_columns, hparams=hparams) - estimator.train(input_fn=input_fn_train) - estimator.evaluate(input_fn=input_fn_eval) - estimator.predict(input_fn=input_fn_predict) - ``` - - Args: - feature_columns: Optional, an iteratable containing all the feature - columns used by the model. All items in the set should be instances of - classes derived from `FeatureColumn`. If not given, the model will - use as features the tensors returned by input_fn. - Supported types of columns: RealValuedColumn. - model_dir: Directory to save model parameters, graphs and etc. This can - also be used to load checkpoints from the directory into an estimator to - continue training a previously saved model. - quantiles_dir: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated only once with - `pwl_calibration_layers.calculate_quantiles_for_keypoints` in a separate - invocation of your program. If you don't want to use quantiles, you can - set `keypoints_initializer` instead. - keypoints_initializers_fn: if you know the distribution of your - input features you can provide that directly instead of `quantiles_dir`. - See `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be - a closure that returns a pair of tensors with keypoints inputs and - outputs to use for initialization (must match `num_keypoints` configured - in `hparams`). Alternatively the closure can return a dict mapping - feature name to pairs for initialization per feature. If `quantiles_dir` - and `keypoints_initializers_fn` are set, the later takes precendence, - and the features for which `keypoints_initializers` are not defined - fallback to using the quantiles found in `quantiles_dir`. It uses a - closure instead of the tensors themselves because the graph has to be - created at the time the model is being build, which happens at a later - time. - optimizer: string, `Optimizer` object, or callable that defines the - optimizer to use for training -- if a callable, it will be called with - learning_rate=hparams.learning_rate. - config: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). - hparams: an instance of tfl_hparams.CalibrationEtlHParams. If set to - None default parameters are used. - head: a `TensorFlow Estimator Head` which specifies how the loss function, - final predictions, and so on are generated from model outputs. Defaults - to using a sigmoid cross entropy head for binary classification and mean - squared error head for regression. - weight_column: A string or a `tf.feature_column.numeric_column` defining - feature column representing weights. It is used to down weight or boost - examples during training. It will be multiplied by the loss of the - example. - - Returns: - A `calibrated_etl_classifier` estimator. - - Raises: - ValueError: invalid parameters. - KeyError: type of feature not supported. - """ - return _CalibratedEtl( - n_classes=2, - feature_columns=feature_columns, - model_dir=model_dir, - quantiles_dir=quantiles_dir, - keypoints_initializers_fn=keypoints_initializers_fn, - optimizer=optimizer, - config=config, - hparams=hparams, - head=head, - weight_column=weight_column) - - -def calibrated_etl_regressor(feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None, - head=None, - weight_column=None): - """Calibrated etl regressor model. - - This model uses a piecewise lattice calibration function on each of the - inputs (parametrized) and then feeds them to ensemble of random lattices. - num_lattices and lattice_rank (number of inputs to each lattice) must be - specified in the hyperparameter. Optionally calibration can be made monotonic. - - It usually requires a preprocessing step on the data, to calculate the - quantiles of each used feature. This can be done locally or in one worker - only before training, in a separate invocation of your program (or directly). - Typically this can be saved (`save_dir` parameter) to the same - directory where the data is. - - Hyper-parameters are given in the form of the object - tfl_hparams.CalibrationEtlHParams. lattice_rank and num_lattices must - be specified; there would be no default value for this. It also takes in - per-feature parameters. - - Internally values will be converted to tf.float32. - - Example: - - ```python - def input_fn_train: ... - def input_fn_eval: ... - - my_feature_columns=[...] - - # Have a separate program flag to generate the quantiles. Need to be run - # only once. - if FLAGS.create_quantiles: - pwl_calibrators_layers.calculate_quantiles_for_keypoints( - input_fn=input_fn_train, - feature_columns=my_feature_columns, - save_dir=FLAGS.data_dir, - num_quantiles=1000, - override=True) - return # Exit program. - - hparams = hparams.CalibratedEtlHparams(num_lattices=10, lattice_rank=2) - estimator = calibrated_etl.calibrated_etl_classifier( - feature_columns=feature_columns, hparams=hparams) - estimator.train(input_fn=input_fn_train) - estimator.evaluate(input_fn=input_fn_eval) - estimator.predict(input_fn=input_fn_predict) - ``` - - Args: - feature_columns: Optional, an iteratable containing all the feature - columns used by the model. All items in the set should be instances of - classes derived from `FeatureColumn`. If not given, the model will - use as features the tensors returned by input_fn. - Supported types of columns: RealValuedColumn. - model_dir: Directory to save model parameters, graphs and etc. This can - also be used to load checkpoints from the directory into an estimator to - continue training a previously saved model. - quantiles_dir: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated only once with - `pwl_calibration_layers.calculate_quantiles_for_keypoints` in a separate - invocation of your program. If you don't want to use quantiles, you can - set `keypoints_initializer` instead. - keypoints_initializers_fn: if you know the distribution of your - input features you can provide that directly instead of `quantiles_dir`. - See `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be - a closure that returns a pair of tensors with keypoints inputs and - outputs to use for initialization (must match `num_keypoints` configured - in `hparams`). Alternatively the closure can return a dict mapping - feature name to pairs for initialization per feature. If `quantiles_dir` - and `keypoints_initializers_fn` are set, the later takes precendence, - and the features for which `keypoints_initializers` are not defined - fallback to using the quantiles found in `quantiles_dir`. It uses a - closure instead of the tensors themselves because the graph has to be - created at the time the model is being build, which happens at a later - time. - optimizer: string, `Optimizer` object, or callable that defines the - optimizer to use for training -- if a callable, it will be called with - learning_rate=hparams.learning_rate. - config: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). - hparams: an instance of tfl_hparams.CalibrationEtlHParams. If set to - None default parameters are used. - head: a `TensorFlow Estimator Head` which specifies how the loss function, - final predictions, and so on are generated from model outputs. Defaults - to using a sigmoid cross entropy head for binary classification and mean - squared error head for regression. - weight_column: A string or a `tf.feature_column.numeric_column` defining - feature column representing weights. It is used to down weight or boost - examples during training. It will be multiplied by the loss of the - example. - - Returns: - A `calibrated_etl_regressor` estimator. - - Raises: - ValueError: invalid parameters. - KeyError: type of feature not supported. - """ - return _CalibratedEtl( - n_classes=0, - feature_columns=feature_columns, - model_dir=model_dir, - quantiles_dir=quantiles_dir, - keypoints_initializers_fn=keypoints_initializers_fn, - optimizer=optimizer, - config=config, - hparams=hparams, - head=head, - weight_column=weight_column) diff --git a/tensorflow_lattice/python/estimators/calibrated_etl_test.py b/tensorflow_lattice/python/estimators/calibrated_etl_test.py deleted file mode 100644 index 71bb39e..0000000 --- a/tensorflow_lattice/python/estimators/calibrated_etl_test.py +++ /dev/null @@ -1,389 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""CalibratedEtl tests.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - -from tensorflow_lattice.python.estimators import calibrated_etl -from tensorflow_lattice.python.estimators import hparams as tfl_hparams -from tensorflow_lattice.python.lib import keypoints_initialization -from tensorflow_lattice.python.lib import test_data - -_NUM_KEYPOINTS = 50 - - -class CalibratedEtlHParamsTest(tf.test.TestCase): - - def testEmptyMonotonicLatticeRankExpectsError(self): - hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x']) - hparams.set_param('monotonic_num_lattices', 2) - hparams.set_param('monotonic_lattice_size', 2) - with self.assertRaisesRegexp( - ValueError, - 'Hyperparameter configuration cannot be used in the calibrated etl ' - 'estimator.'): - calibrated_etl.calibrated_etl_classifier(hparams=hparams) - - def testEmptyMonotonicLatticeSizeExpectsError(self): - hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x']) - hparams.set_param('monotonic_num_lattices', 2) - hparams.set_param('monotonic_lattice_rank', 2) - with self.assertRaisesRegexp( - ValueError, - 'Hyperparameter configuration cannot be used in the calibrated etl ' - 'estimator.'): - calibrated_etl.calibrated_etl_classifier(hparams=hparams) - - def testEmptyNonMonotonicLatticeRankExpectsError(self): - hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x']) - hparams.set_param('non_monotonic_num_lattices', 2) - hparams.set_param('non_monotonic_lattice_size', 2) - with self.assertRaisesRegexp( - ValueError, - 'Hyperparameter configuration cannot be used in the calibrated etl ' - 'estimator.'): - calibrated_etl.calibrated_etl_classifier(hparams=hparams) - - def testEmptyNonMonotonicLatticeSizeExpectsError(self): - hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x']) - hparams.set_param('non_monotonic_num_lattices', 2) - hparams.set_param('non_monotonic_lattice_rank', 2) - with self.assertRaisesRegexp( - ValueError, - 'Hyperparameter configuration cannot be used in the calibrated etl ' - 'estimator.'): - calibrated_etl.calibrated_etl_classifier(hparams=hparams) - - with self.assertRaisesRegexp( - ValueError, - 'Hyperparameter configuration cannot be used in the calibrated etl ' - 'estimator.'): - calibrated_etl.calibrated_etl_classifier(hparams=hparams) - - -class CalibratedEtlTest(tf.test.TestCase): - - def setUp(self): - super(CalibratedEtlTest, self).setUp() - self._test_data = test_data.TestData() - - def _CalibratedEtlRegressor(self, - feature_names, - feature_columns, - weight_column=None, - **hparams_args): - - def init_fn(): - return keypoints_initialization.uniform_keypoints_for_signal( - _NUM_KEYPOINTS, -1., 1., 0., 1.) - - hparams = tfl_hparams.CalibratedEtlHParams( - feature_names, - num_keypoints=_NUM_KEYPOINTS, - monotonic_num_lattices=1, - monotonic_lattice_rank=1, - monotonic_lattice_size=2, - non_monotonic_num_lattices=1, - non_monotonic_lattice_rank=1, - non_monotonic_lattice_size=2, - **hparams_args) - # Turn off monotonic calibrator. - hparams.set_param('calibration_monotonic', None) - hparams.set_param('learning_rate', 0.1) - - return calibrated_etl.calibrated_etl_regressor( - feature_columns=feature_columns, - weight_column=weight_column, - hparams=hparams, - keypoints_initializers_fn=init_fn) - - def _CalibratedEtlClassifier(self, feature_columns, **hparams_args): - - def init_fn(): - return keypoints_initialization.uniform_keypoints_for_signal( - _NUM_KEYPOINTS, -1., 1., 0., 1.) - - hparams = tfl_hparams.CalibratedEtlHParams( - num_keypoints=_NUM_KEYPOINTS, - monotonic_num_lattices=1, - monotonic_lattice_rank=1, - monotonic_lattice_size=2, - non_monotonic_num_lattices=1, - non_monotonic_lattice_rank=1, - non_monotonic_lattice_size=2, - **hparams_args) - # Turn off monotonic calibrator. - hparams.set_param('calibration_monotonic', None) - hparams.set_param('learning_rate', 0.1) - - return calibrated_etl.calibrated_etl_classifier( - feature_columns=feature_columns, - hparams=hparams, - keypoints_initializers_fn=init_fn) - - def testCalibratedEtlRegressorTraining1D(self): - feature_columns = [ - tf.feature_column.numeric_column('x'), - ] - estimator = self._CalibratedEtlRegressor(['x'], - feature_columns, - interpolation_type='simplex') - estimator.train(input_fn=self._test_data.oned_input_fn()) - # Here we only check the successful evaluation. - # Checking the actual number, accuracy, etc, makes the test too flaky. - _ = estimator.evaluate(input_fn=self._test_data.oned_input_fn()) - - def testCalibratedEtlRegressorWeightedTraining1D(self): - feature_columns = [tf.feature_column.numeric_column('x')] - weight_column = tf.feature_column.numeric_column('zero') - estimator = self._CalibratedEtlRegressor(['x'], - feature_columns, - weight_column=weight_column) - estimator.train(input_fn=self._test_data.oned_zero_weight_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.oned_zero_weight_input_fn()) - # Expects almost zero since the weight values are exactly zero. - self.assertLess(results['average_loss'], 1e-7) - - def testCalibratedEtlRegressorTraining2D(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedEtlRegressor(['x0', 'x1'], - feature_columns, - interpolation_type='hypercube') - estimator.train(input_fn=self._test_data.twod_input_fn()) - # Here we only check the successful evaluation. - # Checking the actual number, accuracy, etc, makes the test too flaky. - _ = estimator.evaluate(input_fn=self._test_data.twod_input_fn()) - - def testCalibratedEtlRegressorTraining2DWithCalbrationRegularization(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedEtlRegressor(['x0', 'x1'], - feature_columns, - interpolation_type='simplex', - calibration_l1_reg=1e-2, - calibration_l2_reg=1e-2, - calibration_l1_laplacian_reg=0.05, - calibration_l2_laplacian_reg=0.01) - estimator.train(input_fn=self._test_data.twod_input_fn()) - # Here we only check the successful evaluation. - # Checking the actual number, accuracy, etc, makes the test too flaky. - _ = estimator.evaluate(input_fn=self._test_data.twod_input_fn()) - - def testCalibratedEtlRegressorTraining2DWithLatticeRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedEtlRegressor(['x0', 'x1'], - feature_columns, - interpolation_type='simplex', - lattice_l1_reg=1.0, - lattice_l2_reg=1.0, - lattice_l1_torsion_reg=1.0, - lattice_l2_torsion_reg=1.0, - lattice_l1_laplacian_reg=1.0, - lattice_l2_laplacian_reg=1.0) - estimator.train(input_fn=self._test_data.twod_input_fn()) - # Here we only check the successful evaluation. - # Checking the actual number, accuracy, etc, makes the test too flaky. - _ = estimator.evaluate(input_fn=self._test_data.twod_input_fn()) - - def testCalibratedEtlRegressorTrainingMultiDimensionalFeature(self): - feature_columns = [ - tf.feature_column.numeric_column('x', shape=(2,)), - ] - estimator = self._CalibratedEtlRegressor(['x'], feature_columns) - estimator.train(input_fn=self._test_data.multid_feature_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.multid_feature_input_fn()) - self.assertLess(results['average_loss'], 1e-2) - - # Turn-off calibration for feature 'x', it should turn off for both - # dimensions, and the results should get much worse. - estimator = self._CalibratedEtlRegressor(['x'], - feature_columns, - feature__x__num_keypoints=0) - estimator.train(input_fn=self._test_data.multid_feature_input_fn()) - # Here we only check the successful evaluation. - # Checking the actual number, accuracy, etc, makes the test too flaky. - _ = estimator.evaluate(input_fn=self._test_data.multid_feature_input_fn()) - - def testCalibratedEtlClassifierTraining(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedEtlClassifier(feature_columns) - estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) - # Here we only check the successful evaluation. - # Checking the actual number, accuracy, etc, makes the test too flaky. - _ = estimator.evaluate(input_fn=self._test_data.twod_classificer_input_fn()) - - def testCalibratedEtlClassifierTrainingWithCalibrationRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedEtlClassifier( - feature_columns, - calibration_l1_reg=1e-2, - calibration_l2_reg=1e-2, - calibration_l1_laplacian_reg=1e-1, - calibration_l2_laplacian_reg=1e-1, - interpolation_type='hypercube') - - estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) - # Here we only check the successful evaluation. - # Checking the actual number, accuracy, etc, makes the test too flaky. - _ = estimator.evaluate(input_fn=self._test_data.twod_classificer_input_fn()) - - def testCalibratedEtlClassifierTrainingWithLatticeRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedEtlClassifier( - feature_columns, - lattice_l1_reg=1.0, - lattice_l2_reg=1.0, - lattice_l1_torsion_reg=1.0, - lattice_l2_torsion_reg=1.0, - lattice_l1_laplacian_reg=1.0, - lattice_l2_laplacian_reg=1.0, - interpolation_type='hypercube') - - estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) - # Here we only check the successful evaluation. - # Checking the actual number, accuracy, etc, makes the test too flaky. - _ = estimator.evaluate(input_fn=self._test_data.twod_classificer_input_fn()) - - def testCalibratedEtlMonotonicClassifierTraining(self): - # Construct the following training pair. - # - # Training: (x, y) - # ([0., 0.], 0.0) - # ([0., 1.], 1.0) - # ([1., 0.], 1.0) - # ([1., 1.], 0.0) - # - # which is not a monotonic function. Then check the forcing monotonicity - # resulted in the following monotonicity or not. - # f(0, 0) <= f(0, 1), f(0, 0) <= f(1, 0), f(0, 1) <= f(1, 1), - # f(1, 0) < = f(1, 1). - x0 = np.array([0.0, 0.0, 1.0, 1.0]) - x1 = np.array([0.0, 1.0, 0.0, 1.0]) - x_samples = {'x0': x0, 'x1': x1} - training_y = np.array([[False], [True], [True], [False]]) - - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x=x_samples, y=training_y, batch_size=4, num_epochs=1000, shuffle=False) - test_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x=x_samples, y=None, shuffle=False) - - # Define monotonic lattice classifier. - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - - def init_fn(): - return keypoints_initialization.uniform_keypoints_for_signal( - 2, 0., 1., 0., 1.) - - hparams = tfl_hparams.CalibratedEtlHParams( - num_keypoints=2, - monotonic_num_lattices=2, - monotonic_lattice_rank=2, - monotonic_lattice_size=2) - hparams.set_param('calibration_monotonic', +1) - hparams.set_param('lattice_monotonic', True) - hparams.set_param('learning_rate', 0.1) - - estimator = calibrated_etl.calibrated_etl_classifier( - feature_columns=feature_columns, - hparams=hparams, - keypoints_initializers_fn=init_fn) - estimator.train(input_fn=train_input_fn) - predictions = [ - results['logits'][0] - for results in estimator.predict(input_fn=test_input_fn) - ] - - self.assertEqual(len(predictions), 4) - # Check monotonicity. Note that projection has its own precision, so we - # add a small number. - self.assertLess(predictions[0], predictions[1] + 1e-4) - self.assertLess(predictions[0], predictions[2] + 1e-4) - self.assertLess(predictions[1], predictions[3] + 1e-4) - self.assertLess(predictions[2], predictions[3] + 1e-4) - - def testCalibratedEtlWithMissingTraining(self): - # x0 is missing with it's own vertex: so it can take very different values, - # while x1 is missing and calibrated, in this case to the middle of the - # lattice. - x0 = np.array([0., 0., 1., 1., -1., -1., 0., 1.]) - x1 = np.array([0., 1., 0., 1., 0., 1., -1., -1.]) - training_y = np.array([1., 3., 7., 11., 23., 27., 2., 9.]) - x_samples = {'x0': x0, 'x1': x1} - - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x=x_samples, - y=training_y, - batch_size=x0.shape[0], - num_epochs=2000, - shuffle=False) - test_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x=x_samples, y=training_y, shuffle=False) - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - - def init_fn(): - return keypoints_initialization.uniform_keypoints_for_signal( - 2, 0., 1., 0., 1.) - - hparams = tfl_hparams.CalibratedEtlHParams(['x0', 'x1'], - num_keypoints=2, - non_monotonic_num_lattices=5, - non_monotonic_lattice_rank=2, - non_monotonic_lattice_size=2, - learning_rate=0.1, - missing_input_value=-1.) - - estimator = calibrated_etl.calibrated_etl_regressor( - feature_columns=feature_columns, - hparams=hparams, - keypoints_initializers_fn=init_fn) - - estimator.train(input_fn=train_input_fn) - # Here we only check the successful evaluation. - # Checking the actual number, accuracy, etc, makes the test too flaky. - _ = estimator.evaluate(input_fn=test_input_fn) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_lattice/python/estimators/calibrated_lattice.py b/tensorflow_lattice/python/estimators/calibrated_lattice.py deleted file mode 100644 index ac4c4dd..0000000 --- a/tensorflow_lattice/python/estimators/calibrated_lattice.py +++ /dev/null @@ -1,460 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""CalibratedLattice canned estimators.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import copy - -from tensorflow_lattice.python.estimators import calibrated as calibrated_lib -from tensorflow_lattice.python.estimators import hparams as tfl_hparams -from tensorflow_lattice.python.lib import lattice_layers -from tensorflow_lattice.python.lib import regularizers - -_EPSILON = 1e-7 - - -class _CalibratedLattice(calibrated_lib.Calibrated): - """Base class for CalibratedLattice{Classifier|Regressor}.""" - - def __init__(self, - n_classes, - feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - lattice_initializers_fn=None, - optimizer=None, - config=None, - hparams=None, - head=None, - weight_column=None): - """Construct CalibrateLatticeClassifier/Regressor.""" - if not hparams: - hparams = tfl_hparams.CalibratedLatticeHParams([]) - self.check_hparams(hparams) - hparams = self._set_calibration_params(hparams) - - self.lattice_initializers_fn_ = lattice_initializers_fn - - super(_CalibratedLattice, - self).__init__(n_classes, feature_columns, model_dir, quantiles_dir, - keypoints_initializers_fn, optimizer, config, hparams, - head, weight_column, 'lattice') - - def _check_param_configuration(self, adjusted, monotonicity, lattice_size, - calibration_output_min, calibration_output_max, - calibration_bound, missing_input_value, - missing_vertex, *unused_args): - error_messages = [] - if monotonicity not in {-1, 0, +1}: - error_messages.append('monotonicity should be an integer {-1, 0, +1} ' - 'but is %s' % monotonicity) - if lattice_size < 2: - error_messages.append('lattice_size should be greater than equal to 2' - 'but is %d' % (lattice_size)) - - if not calibration_bound: - error_messages.append( - 'A lattice expects an bounded input from a calibration layer, but ' - 'calibration_bound is set to be False') - - if not adjusted: - if calibration_output_min is not None: - error_messages.append( - 'calibration_output_min=%d should not be set, it is adjusted ' - 'automatically to match the lattice_size' % calibration_output_min) - if calibration_output_max is not None: - error_messages.append( - 'calibration_output_max=%d should not be set, it is adjusted ' - 'automatically to match the lattice_size' % calibration_output_max) - - if missing_input_value is None and missing_vertex: - error_messages.append( - 'missing_vertex is True, however missing_input_value not set') - - return error_messages - - def _check_not_allowed_feature_params(self, hparams): - not_allowed_feature_params = map( - 'lattice_{}'.format, - regularizers.LATTICE_MULTI_DIMENSIONAL_REGULARIZERS) - error_messages = [] - for param in not_allowed_feature_params: - for feature_name in hparams.get_feature_names(): - if hparams.is_feature_set_param(feature_name, param): - error_messages.append('feature %s sets %s, which is not allowed.' % - (feature_name, param)) - return error_messages - - def check_hparams(self, hparams, adjusted=False): - """Check pre-conditions of hparams. - - Args: - hparams: (tfl_hparams.CalibratedLatticeHParams) Hyperparameter to - be examined. - adjusted: if these are the parameters already adjusted - Raises: - ValueError: If the hyperparameter configuration is invalid, for example - calibration_monotonic is None, but lattice_monotonic is True, then raise - the error with a root cause. - """ - error_messages = self._check_not_allowed_feature_params(hparams) - - # Check global params. - feature_names = hparams.get_feature_names() - param_list = [ - 'monotonicity', - 'lattice_size', - 'calibration_output_min', - 'calibration_output_max', - 'calibration_bound', - 'missing_input_value', - 'missing_vertex', - ] + ['lattice_{}'.format(r) for r in regularizers.LATTICE_REGULARIZERS] - - global_values, per_feature_values = hparams.get_global_and_feature_params( - param_list, feature_names) - global_param_error_messages = self._check_param_configuration( - adjusted, *global_values) - if global_param_error_messages: - error_messages.append('Error message for global param:') - error_messages += global_param_error_messages - - # Check per feature params. hparams.get_feature_names() will only return - # feature names that sets per feature parameters. - for feature_idx in range(len(per_feature_values)): - per_feature_param_error_messages = self._check_param_configuration( - adjusted, *per_feature_values[feature_idx]) - if per_feature_param_error_messages: - error_messages.append( - 'Error message for %s feature param:' % feature_names[feature_idx]) - error_messages += per_feature_param_error_messages - - if error_messages: - raise ValueError( - 'Hyperparameter configuration cannot be used in the calibrated ' - 'lattice estimator. Error messages report the issue per feature, but' - ' the parameter may be inherited from global parameter.\nDetailed ' - 'error messsages\n%s' % '\n'.join(error_messages)) - - def _set_calibration_params(self, hparams): - hparams = copy.deepcopy(hparams) - feature_names = hparams.get_feature_names() - global_values, per_feature_values = hparams.get_global_and_feature_params( - ['lattice_size', 'missing_input_value', 'missing_vertex'], - feature_names) - - final_lattice_size, missing_output_value = self._calibration_params( - *global_values) - lattice_size = global_values[0] - hparams.set_param('calibration_output_min', 0) - hparams.set_param('calibration_output_max', lattice_size - 1) - hparams.set_param('final_lattice_size', final_lattice_size) - hparams.set_param('missing_output_value', missing_output_value) - - for feature_idx in range(len(per_feature_values)): - feature_name = feature_names[feature_idx] - final_lattice_size, missing_output_value = self._calibration_params( - *per_feature_values[feature_idx]) - lattice_size = per_feature_values[feature_idx][0] - hparams.set_feature_param(feature_name, 'calibration_output_min', 0) - hparams.set_feature_param(feature_name, 'calibration_output_max', - lattice_size - 1) - hparams.set_feature_param(feature_name, 'final_lattice_size', - final_lattice_size) - hparams.set_feature_param(feature_name, 'missing_output_value', - missing_output_value) - return hparams - - def _calibration_params(self, lattice_size, missing_input_value, - missing_vertex): - """Returns final_lattice_size and missing_output_value.""" - if missing_input_value is None or not missing_vertex: - return lattice_size, None - - # Last vertex of the lattice is reserved for missing values. - return lattice_size + 1, lattice_size - - def calibration_structure_builder(self, columns_to_tensors, hparams): - """Returns the calibration structure of the model. See base class.""" - return None - - def prediction_builder_from_calibrated( - self, mode, per_dimension_feature_names, hparams, calibrated): - """Construct the prediciton.""" - self.check_hparams(hparams, adjusted=True) - lattice_sizes = [ - hparams.get_feature_param(f, 'final_lattice_size') - for f in per_dimension_feature_names - ] - lattice_monotonic = [(hparams.get_feature_param(f, 'monotonicity') != 0) - for f in per_dimension_feature_names] - interpolation_type = hparams.get_param('interpolation_type') - - # Setup the regularization. - regularizer_amounts = {} - for reg_name in regularizers.LATTICE_MULTI_DIMENSIONAL_REGULARIZERS: - regularizer_amounts[reg_name] = hparams.get_param( - 'lattice_{}'.format(reg_name)) - for reg_name in regularizers.LATTICE_ONE_DIMENSIONAL_REGULARIZERS: - regularizer_amounts[reg_name] = [ - hparams.get_feature_param(feature_name, 'lattice_{}'.format(reg_name)) - for feature_name in per_dimension_feature_names - ] - - packed_results = lattice_layers.lattice_layer( - calibrated, - lattice_sizes, - is_monotone=lattice_monotonic, - interpolation_type=interpolation_type, - lattice_initializer=self.lattice_initializers_fn_, - **regularizer_amounts) - (prediction, _, projection_ops, regularization) = packed_results - # Returns prediction Tensor, projection ops, and regularization. - return prediction, projection_ops, regularization - - -def calibrated_lattice_classifier(feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None, - head=None, - weight_column=None): - """Calibrated lattice classifier binary model. - - - - This model uses a piecewise lattice calibration function on each of the - real (as opposed to binary) inputs (parametrized) and then combines (sum up) - the results. Optionally calibration can be made monotonic. - - It usually requires a preprocessing step on the data, to calculate the - quantiles of each used feature. This can be done locally or in one worker - only before training, in a separate invocation of your program (or directly). - Typically this can be save (`save_dir` parameter) to the same - directory where the data is. - - Hyper-parameters are given in the form of the object - tfl_hparams.CalibrationHParams. It takes in per-feature calibration - parameters. - - Internally values will be converted to tf.float32. - - Example: - - ```python - def input_fn_train: ... - def input_fn_eval: ... - - my_feature_columns=[...] - - # Have a separate program flag to generate the quantiles. Need to be run - # only once. - if FLAGS.create_quantiles: - pwl_calibrators_layers.calculate_quantiles_for_keypoints( - input_fn=input_fn_train, - feature_columns=my_feature_columns, - save_dir=FLAGS.data_dir, - num_quantiles=1000, - override=True) - return # Exit program. - - estimator = calibrated_lattice.CalibratedLatticeClassifier( - feature_columns=feature_columns) - estimator.train(input_fn=input_fn_train) - estimator.evaluate(input_fn=input_fn_eval) - estimator.predict(input_fn=input_fn_predict) - ``` - - Args: - feature_columns: Optional, an iteratable containing all the feature - columns used by the model. All items in the set should be instances of - classes derived from `FeatureColumn`. If not given, the model will - use as features the tensors returned by input_fn. - Supported types of columns: RealValuedColumn. - model_dir: Directory to save model parameters, graph and etc. This can - also be used to load checkpoints from the directory into a estimator to - continue training a previously saved model. - quantiles_dir: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated only once with - `pwl_calibrators_layers.calculate_quantiles_for_keypoints` in a separate - invocation of your program. If you don't want to use quantiles, you can - set `keypoints_initializer` instead. - keypoints_initializers_fn: if you know the distribution of your - input features you can provide that directly instead of `quantiles_dir`. - See `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be - as a closure that when called will return a pair of tensors with - keypoints input and output initializes. Alternatively can be given as - a dict mapping feature name to keypoints_initializers_fn, so one - can have one initialization per feature. It uses a closure instead of - the tensors themselves because the graph has to be created at the time - the model is being build, which happens at a later time. - optimizer: string, `Optimizer` object, or callable that defines the - optimizer to use for training -- if a callable, it will be called with - learning_rate=hparams.learning_rate. - config: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). - hparams: an instance of tfl_hparams.CalibrationHParams. If set to - None default parameters are used. - head: a `TensorFlow Estimator Head` which specifies how the loss function, - final predictions, and so on are generated from model outputs. Defaults - to using a sigmoid cross entropy head for binary classification and mean - squared error head for regression. - weight_column: A string or a `tf.feature_column.numeric_column` defining - feature column representing weights. It is used to down weight or boost - examples during training. It will be multiplied by the loss of the - example. - - Returns: - A `CalibratedLatticeClassifier` estimator. - - Raises: - ValueError: invalid parameters. - KeyError: type of feature not supported. - """ - return _CalibratedLattice( - n_classes=2, - feature_columns=feature_columns, - model_dir=model_dir, - quantiles_dir=quantiles_dir, - keypoints_initializers_fn=keypoints_initializers_fn, - optimizer=optimizer, - config=config, - hparams=hparams, - head=head, - weight_column=weight_column) - - -def calibrated_lattice_regressor(feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None, - head=None, - weight_column=None): - """Calibrated lattice estimator (model) for regression. - - This model uses a piecewise lattice calibration function on each of the - inputs (parametrized) and then combine (sum up) the results. Optionally - calibration can be made monotonic. - - It usually requires a preprocessing step on the data, to calculate the - quantiles of each used feature. This can be done locally or in one worker - only before training, in a separate invocation of your program (or directly) - in . Typically this can be save (`save_dir` parameter) to the same - directory where the data is. - - Hyper-parameters are given in the form of the object - tfl_hparams.CalibrationHParams. It takes in per-feature calibration - parameters. - - Internally values will be converted to tf.float32. - - - - Example: - - ```python - def input_fn_train: ... - def input_fn_eval: ... - - my_feature_columns=[...] - - # Have a separate program flag to generate the quantiles. Need to be run - # only once. - if FLAGS.create_quantiles: - pwl_calibrators_layers.calculate_quantiles_for_keypoints( - input_fn=input_fn_train, - feature_columns=my_feature_columns, - save_dir=FLAGS.data_dir, - num_quantiles=1000, - override=True) - return # Exit program. - - estimator = calibrated_lattice.calibrated_lattice_regressor( - feature_columns=feature_columns) - estimator.train(input_fn=input_fn_train) - estimator.evaluate(input_fn=input_fn_eval) - estimator.predict(input_fn=input_fn_predict) - ``` - - Args: - feature_columns: Optional, if not set the model will use all features - returned by input_fn. An iteratable containing all the feature - columns used by the model. All items in the set should be instances of - classes derived from `FeatureColumn`. If not given, the model will - use as features the tensors returned by input_fn. - Supported types: RealValuedColumn. - model_dir: Directory to save model parameters, graph and etc. This can - also be used to load checkpoints from the directory into a estimator to - continue training a previously saved model. - quantiles_dir: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated only once with - `pwl_calibrators_layers.calculate_quantiles_for_keypoints` in a separate - invocation of your program. If you don't want to use quantiles, you can - set `keypoints_initializer` instead. - keypoints_initializers_fn: if you know the distribution of your - input features you can provide that directly instead of `quantiles_dir`. - See `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be - as a closure that when called will return a pair of tensors with - keypoints input and output initializes. Alternatively can be given as - a dict mapping feature name to keypoints_initializers_fn, so one - can have one initialization per feature. It uses a closure instead of - the tensors themselves because the graph has to be created at the time - the model is being build, which happens at a later time. - optimizer: string, `Optimizer` object, or callable that defines the - optimizer to use for training -- if a callable, it will be called with - learning_rate=hparams.learning_rate. - config: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). - hparams: an instance of tfl_hparams.CalibrationHParams. If set to - None default parameters are used. - head: a `TensorFlow Estimator Head` which specifies how the loss function, - final predictions, and so on are generated from model outputs. Defaults - to using a sigmoid cross entropy head for binary classification and mean - squared error head for regression. - weight_column: A string or a `tf.feature_column.numeric_column` defining - feature column representing weights. It is used to down weight or boost - examples during training. It will be multiplied by the loss of the - example. - - Returns: - A `CalibratedLatticeRegressor` estimator. - - Raises: - ValueError: invalid parameters. - KeyError: type of feature not supported. - """ - return _CalibratedLattice( - n_classes=0, - feature_columns=feature_columns, - model_dir=model_dir, - quantiles_dir=quantiles_dir, - keypoints_initializers_fn=keypoints_initializers_fn, - optimizer=optimizer, - config=config, - hparams=hparams, - head=head, - weight_column=weight_column) diff --git a/tensorflow_lattice/python/estimators/calibrated_lattice_test.py b/tensorflow_lattice/python/estimators/calibrated_lattice_test.py deleted file mode 100644 index e10a5df..0000000 --- a/tensorflow_lattice/python/estimators/calibrated_lattice_test.py +++ /dev/null @@ -1,406 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""CalibratedLattice provide canned estimators.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - -from tensorflow_lattice.python.estimators import calibrated_lattice -from tensorflow_lattice.python.estimators import hparams as tfl_hparams -from tensorflow_lattice.python.lib import keypoints_initialization -from tensorflow_lattice.python.lib import test_data - -_NUM_KEYPOINTS = 50 - - -class CalibratedLatticeHParamsTest(tf.test.TestCase): - - def setUp(self): - super(CalibratedLatticeHParamsTest, self).setUp() - self.empty_estimator = calibrated_lattice.calibrated_lattice_classifier() - self.hparams = tfl_hparams.CalibratedLatticeHParams(feature_names=['x']) - self.hparams.set_param('lattice_size', 2) - self.hparams.set_param('calibrator_output_min', 0) - self.hparams.set_param('calibrator_output_max', 1) - self.hparams.set_param('calibration_bound', True) - - def testWrongLatticeSize(self): - self.hparams.set_feature_param('x', 'lattice_size', -1) - self.assertRaisesRegexp( - ValueError, - 'Hyperparameter configuration cannot be used in the calibrated lattice ' - 'estimator.', self.empty_estimator.check_hparams, self.hparams) - - def testWrongCalibrationOutputMin(self): - self.hparams.set_param('calibration_output_min', 0.0) - self.hparams.set_feature_param('x', 'calibration_output_min', -1.0) - self.assertRaisesRegexp( - ValueError, - 'calibration_output_min=-1 should not be set, it is adjusted ' - 'automatically to match the lattice_size', - self.empty_estimator.check_hparams, self.hparams) - - def testWrongCalibrationOutputMax(self): - self.hparams.set_param('calibration_output_max', 0.0) - self.hparams.set_feature_param('x', 'calibration_output_max', 10) - self.assertRaisesRegexp( - ValueError, - 'calibration_output_max=10 should not be set, it is adjusted ' - 'automatically to match the lattice_size', - self.empty_estimator.check_hparams, self.hparams) - - def testWrongCalibrationBound(self): - self.hparams.set_feature_param('x', 'calibration_bound', False) - self.assertRaisesRegexp( - ValueError, - 'Hyperparameter configuration cannot be used in the calibrated lattice ' - 'estimator.', self.empty_estimator.check_hparams, self.hparams) - - def testWrongLatticeRegularization(self): - self.hparams.set_feature_param('x', 'lattice_l1_reg', 0.1) - self.hparams.set_feature_param('x', 'lattice_l2_reg', 0.1) - self.hparams.set_feature_param('x', 'lattice_l1_torsion_reg', 0.1) - self.hparams.set_feature_param('x', 'lattice_l1_torsion_reg', 0.1) - self.assertRaisesRegexp( - ValueError, - 'Hyperparameter configuration cannot be used in the calibrated lattice ' - 'estimator.', self.empty_estimator.check_hparams, self.hparams) - - -class CalibratedLatticeTest(tf.test.TestCase): - - def setUp(self): - super(CalibratedLatticeTest, self).setUp() - self._test_data = test_data.TestData() - - def _CalibratedLatticeRegressor(self, - feature_names, - feature_columns, - weight_column=None, - **hparams_args): - - def init_fn(): - return keypoints_initialization.uniform_keypoints_for_signal( - _NUM_KEYPOINTS, -1., 1., 0., 1.) - - hparams = tfl_hparams.CalibratedLatticeHParams( - feature_names, num_keypoints=_NUM_KEYPOINTS, **hparams_args) - # Turn off monotonic calibrator. - hparams.set_param('calibration_monotonic', None) - hparams.set_param('learning_rate', 0.1) - return calibrated_lattice.calibrated_lattice_regressor( - feature_columns=feature_columns, - hparams=hparams, - weight_column=weight_column, - keypoints_initializers_fn=init_fn) - - def _CalibratedLatticeClassifier(self, feature_columns, **hparams_args): - - def init_fn(): - return keypoints_initialization.uniform_keypoints_for_signal( - _NUM_KEYPOINTS, -1., 1., 0., 1.) - - hparams = tfl_hparams.CalibratedLatticeHParams( - num_keypoints=_NUM_KEYPOINTS, **hparams_args) - # Turn off monotonic calibrator. - hparams.set_param('calibration_monotonic', None) - hparams.set_param('learning_rate', 0.1) - - return calibrated_lattice.calibrated_lattice_classifier( - feature_columns=feature_columns, - hparams=hparams, - keypoints_initializers_fn=init_fn) - - def testCalibratedLatticeRegressorTraining1D(self): - feature_columns = [ - tf.feature_column.numeric_column('x'), - ] - estimator = self._CalibratedLatticeRegressor(['x'], feature_columns) - estimator.train(input_fn=self._test_data.oned_input_fn()) - results = estimator.evaluate(input_fn=self._test_data.oned_input_fn()) - self.assertLess(results['average_loss'], 1e-3) - - def testCalibratedLatticeRegressorWeightedTraining1D(self): - feature_columns = [ - tf.feature_column.numeric_column('x'), - ] - weight_column = tf.feature_column.numeric_column('zero') - estimator = self._CalibratedLatticeRegressor(['x'], - feature_columns, - weight_column=weight_column) - estimator.train(input_fn=self._test_data.oned_zero_weight_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.oned_zero_weight_input_fn()) - self.assertLess(results['average_loss'], 1e-7) - - def testCalibratedLatticeRegressorTraining2D(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedLatticeRegressor(['x0', 'x1'], feature_columns) - estimator.train(input_fn=self._test_data.twod_input_fn()) - results = estimator.evaluate(input_fn=self._test_data.twod_input_fn()) - self.assertLess(results['average_loss'], 5e-3) - - def testCalibratedLatticeRegressorTraining2DWithCalibrationRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedLatticeRegressor( - ['x0', 'x1'], - feature_columns, - interpolation_type='simplex', - calibration_l1_reg=1.0, - calibration_l2_reg=1.0, - calibration_l1_laplacian_reg=1.0, - calibration_l2_laplacian_reg=1.0) - estimator.train(input_fn=self._test_data.twod_input_fn()) - results = estimator.evaluate(input_fn=self._test_data.twod_input_fn()) - # We expect the loss is larger than the loss without regularization. - self.assertGreater(results['average_loss'], 1e-2) - self.assertLess(results['average_loss'], 0.1) - - def testCalibratedLatticeRegressorTraining2DWithLatticeRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedLatticeRegressor(['x0', 'x1'], - feature_columns, - interpolation_type='simplex', - lattice_l1_reg=1.0, - lattice_l2_reg=1.0, - lattice_l1_torsion_reg=1.0, - lattice_l2_torsion_reg=1.0, - lattice_l1_laplacian_reg=1.0, - lattice_l2_laplacian_reg=1.0) - estimator.train(input_fn=self._test_data.twod_input_fn()) - results = estimator.evaluate(input_fn=self._test_data.twod_input_fn()) - # We expect the loss is larger than the loss without regularization. - self.assertGreater(results['average_loss'], 1e-2) - self.assertLess(results['average_loss'], 0.5) - - def testCalibratedLatticeRegressorTraining2DWithPerFeatureRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedLatticeRegressor( - ['x0', 'x1'], - feature_columns, - feature__x0__lattice_l1_laplacian_reg=100.0, - feature__x1__lattice_l2_laplacian_reg=100.0) - estimator.train(input_fn=self._test_data.twod_input_fn()) - results = estimator.evaluate(input_fn=self._test_data.twod_input_fn()) - # We expect the loss is larger than the loss without regularization. - self.assertGreater(results['average_loss'], 0.1) - self.assertLess(results['average_loss'], 0.2) - - def testCalibratedLatticeRegressorTrainingMultiDimensionalFeature(self): - feature_columns = [ - tf.feature_column.numeric_column('x', shape=(2,)), - ] - estimator = self._CalibratedLatticeRegressor(['x'], - feature_columns, - interpolation_type='hypercube') - estimator.train(input_fn=self._test_data.multid_feature_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.multid_feature_input_fn()) - self.assertLess(results['average_loss'], 1e-3) - - # Turn-off calibration for feature 'x', it should turn if off for both - # dimensions, and the results should get much worse. - estimator = self._CalibratedLatticeRegressor(['x'], - feature_columns, - feature__x__num_keypoints=0) - estimator.train(input_fn=self._test_data.multid_feature_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.multid_feature_input_fn()) - self.assertGreater(results['average_loss'], 1e-2) - - def testCalibratedLatticeClassifierTraining(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedLatticeClassifier(feature_columns) - estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.twod_classificer_input_fn()) - self.assertGreater(results['auc'], 0.990) - - def testCalibratedLatticeClassifierTrainingWithCalibrationRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedLatticeClassifier( - feature_columns, - interpolation_type='hypercube', - calibration_l1_reg=0.3, - calibration_l2_reg=0.3, - calibration_l1_laplacian_reg=1.0, - calibration_l2_laplacian_reg=1.0) - estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.twod_classificer_input_fn()) - # We expect AUC is worse than the model without regularization. - self.assertLess(results['auc'], 0.98) - - def testCalibratedLatticeClassifierTrainingWithLatticeRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedLatticeClassifier( - feature_columns, - interpolation_type='simplex', - lattice_l1_reg=5.0, - lattice_l2_reg=5.0, - lattice_l1_torsion_reg=5.0, - lattice_l2_torsion_reg=5.0, - lattice_l1_laplacian_reg=5.0, - lattice_l2_laplacian_reg=5.0) - estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.twod_classificer_input_fn()) - # We expect AUC is worse than the model without regularization. - self.assertLess(results['auc'], 0.98) - self.assertGreater(results['auc'], 0.68) - - def testCalibratedLatticeClassifierTrainingWithPerFeatureRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedLatticeClassifier( - feature_columns, - feature_names=['x0', 'x1'], - feature__x0__lattice_l1_laplacian_reg=50.0, - feature__x1__lattice_l2_laplacian_reg=50.0) - estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.twod_classificer_input_fn()) - # We expect AUC is worse than the model without regularization. - self.assertLess(results['auc'], 0.98) - self.assertGreater(results['auc'], 0.8) - - def testCalibratedLatticeMonotonicClassifierTraining(self): - # Construct the following training/testing pair. - # - # Training: (x, y) - # ([0., 0.], 0.0) - # ([0., 1.], 1.0) - # ([1., 0.], 1.0) - # ([1., 1.], 0.0) - # - # Test: (x, y) - # ([0., 0.], 0.0) - # ([0., 1.], 1.0) - # ([1., 0.], 1.0) - # ([1., 1.], 1.0) - # - # Note that training example has a noisy sample, ([1., 1.], 0.0), and test - # examples are generated by the logical-OR function. Therefore by enforcing - # increasing monotonicity to all features, we should be able to work well - # in the test examples. - x0 = np.array([0.0, 0.0, 1.0, 1.0]) - x1 = np.array([0.0, 1.0, 0.0, 1.0]) - x_samples = {'x0': x0, 'x1': x1} - training_y = np.array([[False], [True], [True], [False]]) - test_y = np.array([[False], [True], [True], [True]]) - - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x=x_samples, y=training_y, batch_size=4, num_epochs=1000, shuffle=False) - test_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x=x_samples, y=test_y, shuffle=False) - - # Define monotonic lattice classifier. - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - - def init_fn(): - return keypoints_initialization.uniform_keypoints_for_signal( - 2, 0., 1., 0., 1.) - - hparams = tfl_hparams.CalibratedLatticeHParams(num_keypoints=2) - # Monotonic calibrated lattice. - - hparams.set_param('monotonicity', +1) - hparams.set_param('learning_rate', 0.1) - hparams.set_param('interpolation_type', 'hypercube') - - estimator = calibrated_lattice.calibrated_lattice_classifier( - feature_columns=feature_columns, - hparams=hparams, - keypoints_initializers_fn=init_fn) - - estimator.train(input_fn=train_input_fn) - results = estimator.evaluate(input_fn=test_input_fn) - # We should expect 1.0 accuracy. - self.assertGreater(results['accuracy'], 0.999) - - def testCalibratedLatticeWithMissingTraining(self): - # x0 is missing with it's own vertex: so it can take very different values, - # while x1 is missing and calibrated, in this case to the middle of the - # lattice. - x0 = np.array([0., 0., 1., 1., -1., -1., 0., 1.]) - x1 = np.array([0., 1., 0., 1., 0., 1., -1., -1.]) - training_y = np.array([1., 3., 7., 11., 23., 27., 2., 9.]) - x_samples = {'x0': x0, 'x1': x1} - - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x=x_samples, - y=training_y, - batch_size=x0.shape[0], - num_epochs=2000, - shuffle=False) - test_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x=x_samples, y=training_y, shuffle=False) - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - - def init_fn(): - return keypoints_initialization.uniform_keypoints_for_signal( - 2, 0., 1., 0., 1.) - - hparams = tfl_hparams.CalibratedLatticeHParams(['x0', 'x1'], - num_keypoints=2, - learning_rate=0.1, - missing_input_value=-1.) - hparams.set_feature_param('x0', 'missing_vertex', True) - - estimator = calibrated_lattice.calibrated_lattice_regressor( - feature_columns=feature_columns, - hparams=hparams, - keypoints_initializers_fn=init_fn) - - estimator.train(input_fn=train_input_fn) - results = estimator.evaluate(input_fn=test_input_fn) - self.assertLess(results['average_loss'], 0.1) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_lattice/python/estimators/calibrated_linear.py b/tensorflow_lattice/python/estimators/calibrated_linear.py deleted file mode 100644 index 9f3e167..0000000 --- a/tensorflow_lattice/python/estimators/calibrated_linear.py +++ /dev/null @@ -1,365 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""CalibratedLinear canned estimators.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from tensorflow_lattice.python.estimators import calibrated as calibrated_lib -from tensorflow_lattice.python.estimators import hparams as tfl_hparams - -# Scope for variable names. -_SCOPE_BIAS_WEIGHT = 'bias_weight' - - -class _CalibratedLinear(calibrated_lib.Calibrated): - """Base class for CalibratedLinearClassifier and CalibratedLinearRegressor.""" - - def __init__(self, - n_classes, - feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None, - head=None, - weight_column=None): - """Construct CalibrateLinearClassifier/Regressor.""" - if not hparams: - hparams = tfl_hparams.CalibratedLinearHParams([]) - self.check_hparams(hparams) - - super(_CalibratedLinear, - self).__init__(n_classes, feature_columns, model_dir, quantiles_dir, - keypoints_initializers_fn, optimizer, config, hparams, - head, weight_column, 'linear') - - def _check_param_configuration(self, num_keypoints, missing_input_value, - missing_output_value): - error_messages = [] - if ((num_keypoints is None or num_keypoints < 2) and - missing_input_value is not None): - error_messages.append( - 'num_keypoints not set (or too low) so value is not calibrated, ' - 'and cannot handle missing values') - if missing_output_value is not None: - error_messages.append('CalibratedLinear models do not support fixed ' - 'output for missing values') - return error_messages - - def check_hparams(self, hparams): - """Check pre-conditions of hparams. - - Args: - hparams: (tfl_hparams.CalibratedLatticeHParams) Hyperparameter to - be examined. - Raises: - ValueError: If the hyperparameter configuration is invalid, for example - calibration_monotonic is None, but lattice_monotonic is True, then raise - the error with a root cause. - """ - error_messages = [] - - # Check global params. - feature_names = hparams.get_feature_names() - global_values, per_feature_values = hparams.get_global_and_feature_params( - ['num_keypoints', 'missing_input_value', 'missing_output_value'], - feature_names) - global_param_error_messages = self._check_param_configuration( - *global_values) - if global_param_error_messages: - error_messages.append('Error message for global param:') - error_messages += global_param_error_messages - - # Check per feature params. hparams.get_feature_names() will only return - # feature names that sets per feature parameters. - for feature_idx in range(len(per_feature_values)): - per_feature_param_error_messages = self._check_param_configuration( - *per_feature_values[feature_idx]) - if per_feature_param_error_messages: - error_messages.append( - 'Error message for %s feature param:' % feature_names[feature_idx]) - error_messages += per_feature_param_error_messages - - if error_messages: - raise ValueError( - 'Hyperparameter configuration cannot be used in the calibrated ' - 'lattice estimator. Error messages report the issue per feature, but' - ' the parameter may be inherited from global parameter.\nDetailed ' - 'error messsages\n%s' % '\n'.join(error_messages)) - - def calibration_structure_builder(self, columns_to_tensors, hparams): - """Returns the calibration structure of the model. See base class.""" - return None - - def prediction_builder_from_calibrated( - self, mode, per_dimension_feature_names, hparams, calibrated): - # No need for linear weights: since they are redundant, the calibration - # can accommodate the weights. Same could be said for the bias, but - # it turns out that a bias makes it easier to train in the presence of - # many features. - - self.check_hparams(hparams) - prediction = tf.reduce_sum(calibrated, 1, keepdims=True) - bias = tf.compat.v1.get_variable( - _SCOPE_BIAS_WEIGHT, - initializer=tf.zeros(shape=[], dtype=self._dtype)) - prediction += bias - # Returns prediction Tensor, projection ops, and regularization ops. - return prediction, None, None - - -def calibrated_linear_classifier(feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None, - head=None, - weight_column=None): - """Calibrated linear classifier binary model. - - - - This model uses a piecewise linear calibration function on each of the - real (as opposed to binary) inputs (parametrized) and then combines (sum up) - the results. Optionally calibration can be made monotonic. - - It usually requires a preprocessing step on the data, to calculate the - quantiles of each used feature. This can be done locally or in one worker - only before training, in a separate invocation of your program (or directly). - Typically this can be save (`save_dir` parameter) to the same - directory where the data is. - - Hyper-parameters are given in the form of the object - tfl_hparams.CalibrationHParams. It takes in per-feature calibration - parameters. - - Internally values will be converted to tf.float32. - - Example: - - ```python - def input_fn_train: ... - def input_fn_eval: ... - - my_feature_columns=[...] - - # Have a separate program flag to generate the quantiles. Need to be run - # only once. - if FLAGS.create_quantiles: - pwl_calibrators_layers.calculate_quantiles_for_keypoints( - input_fn=input_fn_train, - feature_columns=my_feature_columns, - save_dir=FLAGS.data_dir, - num_quantiles=1000, - override=True) - return # Exit program. - - estimator = calibrated_linear.CalibratedLinearClassifier( - feature_columns=feature_columns) - estimator.train(input_fn=input_fn_train) - estimator.evaluate(input_fn=input_fn_eval) - estimator.predict(input_fn=input_fn_predict) - ``` - - Args: - feature_columns: Optional, an iteratable containing all the feature - columns used by the model. All items in the set should be instances of - classes derived from `FeatureColumn`. If not given, the model will - use as features the tensors returned by input_fn. - Supported types of columns: RealValuedColumn. - model_dir: Directory to save model parameters, graph and etc. This can - also be used to load checkpoints from the directory into a estimator to - continue training a previously saved model. - quantiles_dir: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated only once with - `pwl_calibration_layers.calculate_quantiles_for_keypoints` in a separate - invocation of your program. If you don't want to use quantiles, you can - set `keypoints_initializer` instead. - keypoints_initializers_fn: if you know the distribution of your - input features you can provide that directly instead of `quantiles_dir`. - See `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be - a closure that returns a pair of tensors with keypoints inputs and - outputs to use for initialization (must match `num_keypoints` configured - in `hparams`). Alternatively the closure can return a dict mapping - feature name to pairs for initialization per feature. If `quantiles_dir` - and `keypoints_initializers_fn` are set, the later takes precendence, - and the features for which `keypoints_initializers` are not defined - fallback to using the quantiles found in `quantiles_dir`. It uses a - closure instead of the tensors themselves because the graph has to be - created at the time the model is being build, which happens at a later - time. - optimizer: string, `Optimizer` object, or callable that defines the - optimizer to use for training -- if a callable, it will be called with - learning_rate=hparams.learning_rate. - config: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). - hparams: an instance of tfl_hparams.CalibrationHParams. If set to - None default parameters are used. - head: a `TensorFlow Estimator Head` which specifies how the loss function, - final predictions, and so on are generated from model outputs. Defaults - to using a sigmoid cross entropy head for binary classification and mean - squared error head for regression. - weight_column: A string or a `tf.feature_column.numeric_column` defining - feature column representing weights. It is used to down weight or boost - examples during training. It will be multiplied by the loss of the - example. - - Returns: - A `CalibratedLinearClassifier` estimator. - - Raises: - ValueError: invalid parameters. - KeyError: type of feature not supported. - """ - return _CalibratedLinear( - n_classes=2, - feature_columns=feature_columns, - model_dir=model_dir, - quantiles_dir=quantiles_dir, - keypoints_initializers_fn=keypoints_initializers_fn, - optimizer=optimizer, - config=config, - hparams=hparams, - head=head, - weight_column=weight_column) - - -def calibrated_linear_regressor(feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None, - head=None, - weight_column=None): - """Calibrated linear estimator (model) for regression. - - This model uses a piecewise linear calibration function on each of the - inputs (parametrized) and then combine (sum up) the results. Optionally - calibration can be made monotonic. - - It usually requires a preprocessing step on the data, to calculate the - quantiles of each used feature. This can be done locally or in one worker - only before training, in a separate invocation of your program (or directly) - in . Typically this can be save (`save_dir` parameter) to the same - directory where the data is. - - Hyper-parameters are given in the form of the object - tfl_hparams.CalibrationHParams. It takes in per-feature calibration - parameters. - - Internally values will be converted to tf.float32. - - - - Example: - - ```python - def input_fn_train: ... - def input_fn_eval: ... - - my_feature_columns=[...] - - # Have a separate program flag to generate the quantiles. Need to be run - # only once. - if FLAGS.create_quantiles: - pwl_calibrators_layers.calculate_quantiles_for_keypoints( - input_fn=input_fn_train, - feature_columns=my_feature_columns, - save_dir=FLAGS.data_dir, - num_quantiles=1000, - override=True) - return # Exit program. - - estimator = calibrated_linear.calibrated_linear_regressor( - feature_columns=feature_columns) - estimator.train(input_fn=input_fn_train) - estimator.evaluate(input_fn=input_fn_eval) - estimator.predict(input_fn=input_fn_predict) - ``` - - Args: - feature_columns: Optional, if not set the model will use all features - returned by input_fn. An iteratable containing all the feature - columns used by the model. All items in the set should be instances of - classes derived from `FeatureColumn`. If not given, the model will - use as features the tensors returned by input_fn. - Supported types: RealValuedColumn. - model_dir: Directory to save model parameters, graph and etc. This can - also be used to load checkpoints from the directory into a estimator to - continue training a previously saved model. - quantiles_dir: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated only once with - `pwl_calibration_layers.calculate_quantiles_for_keypoints` in a separate - invocation of your program. If you don't want to use quantiles, you can - set `keypoints_initializer` instead. - keypoints_initializers_fn: if you know the distribution of your - input features you can provide that directly instead of `quantiles_dir`. - See `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be - a closure that returns a pair of tensors with keypoints inputs and - outputs to use for initialization (must match `num_keypoints` configured - in `hparams`). Alternatively the closure can return a dict mapping - feature name to pairs for initialization per feature. If `quantiles_dir` - and `keypoints_initializers_fn` are set, the later takes precendence, - and the features for which `keypoints_initializers` are not defined - fallback to using the quantiles found in `quantiles_dir`. It uses a - closure instead of the tensors themselves because the graph has to be - created at the time the model is being build, which happens at a later - time. - optimizer: string, `Optimizer` object, or callable that defines the - optimizer to use for training -- if a callable, it will be called with - learning_rate=hparams.learning_rate. - config: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). - hparams: an instance of tfl_hparams.CalibrationHParams. If set to - None default parameters are used. - head: a `TensorFlow Estimator Head` which specifies how the loss function, - final predictions, and so on are generated from model outputs. Defaults - to using a sigmoid cross entropy head for binary classification and mean - squared error head for regression. - weight_column: A string or a `tf.feature_column.numeric_column` defining - feature column representing weights. It is used to down weight or boost - examples during training. It will be multiplied by the loss of the - example. - - Returns: - A `CalibratedLinearRegressor` estimator. - - Raises: - ValueError: invalid parameters. - KeyError: type of feature not supported. - """ - return _CalibratedLinear( - n_classes=0, - feature_columns=feature_columns, - model_dir=model_dir, - quantiles_dir=quantiles_dir, - keypoints_initializers_fn=keypoints_initializers_fn, - optimizer=optimizer, - config=config, - hparams=hparams, - head=head, - weight_column=weight_column) diff --git a/tensorflow_lattice/python/estimators/calibrated_linear_test.py b/tensorflow_lattice/python/estimators/calibrated_linear_test.py deleted file mode 100644 index 5406b96..0000000 --- a/tensorflow_lattice/python/estimators/calibrated_linear_test.py +++ /dev/null @@ -1,249 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""CalibratedLinear provides canned estimators.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from tensorflow_lattice.python.estimators import calibrated_linear -from tensorflow_lattice.python.estimators import hparams as tfl_hparams -from tensorflow_lattice.python.lib import keypoints_initialization -from tensorflow_lattice.python.lib import test_data - -_NUM_KEYPOINTS = 50 - - -class CalibratedLinearTest(tf.test.TestCase): - - def setUp(self): - super(CalibratedLinearTest, self).setUp() - self._test_data = test_data.TestData() - - def _LinearRegressor(self, feature_columns): - # Can be used for baseline. - return tf.estimator.LinearRegressor(feature_columns=feature_columns) - - def _CalibratedLinearRegressor(self, - feature_names, - feature_columns, - weight_column=None, - **hparams_args): - - def init_fn(): - return keypoints_initialization.uniform_keypoints_for_signal( - _NUM_KEYPOINTS, -1., 1., -1., 1.) - - hparams = tfl_hparams.CalibratedLinearHParams( - feature_names, num_keypoints=_NUM_KEYPOINTS, **hparams_args) - return calibrated_linear.calibrated_linear_regressor( - feature_columns=feature_columns, - weight_column=weight_column, - hparams=hparams, - keypoints_initializers_fn=init_fn) - - def _CalibratedLinearRegressorWithQuantiles(self, feature_names, - feature_columns, **hparams_args): - """Model that saves/retrieves quantiles.""" - - # Quantiles to be used for x2 - quantiles_dir = self.get_temp_dir() - keypoints_initialization.save_quantiles_for_keypoints( - input_fn=self._test_data.threed_input_fn(True), - save_dir=quantiles_dir, - feature_columns=feature_columns, - num_steps=1) - - # Keypoint initialization function for x0 and x1 - def init_fn(): - return keypoints_initialization.uniform_keypoints_for_signal( - _NUM_KEYPOINTS, -1., 1., -1., 1.) - - hparams = tfl_hparams.CalibratedLinearHParams( - feature_names, num_keypoints=_NUM_KEYPOINTS, **hparams_args) - return calibrated_linear.calibrated_linear_regressor( - feature_columns=feature_columns, - hparams=hparams, - keypoints_initializers_fn={ - 'x0': init_fn, - 'x1': init_fn - }, - quantiles_dir=quantiles_dir # Used for 'x2' - ) - - def _LinearClassifier(self, feature_columns): - # Can be used for baseline. - return tf.estimator.LinearClassifier( - n_classes=2, feature_columns=feature_columns) - - def _CalibratedLinearClassifier(self, feature_names, feature_columns, - **hparams_args): - - def init_fn(): - return keypoints_initialization.uniform_keypoints_for_signal( - _NUM_KEYPOINTS, -1., 1., -1., 1.) - - hparams = tfl_hparams.CalibratedLinearHParams( - feature_names, num_keypoints=_NUM_KEYPOINTS, **hparams_args) - return calibrated_linear.calibrated_linear_classifier( - feature_columns=feature_columns, - hparams=hparams, - keypoints_initializers_fn=init_fn) - - def testCalibratedLinearRegressorTraining1D(self): - feature_columns = [ - tf.feature_column.numeric_column('x'), - ] - estimator = self._CalibratedLinearRegressor(['x'], feature_columns) - estimator.train(input_fn=self._test_data.oned_input_fn()) - results = estimator.evaluate(input_fn=self._test_data.oned_input_fn()) - # For the record: - # Loss(CalibratedLinear)=~2.5e-5 - # Loss(LinearRegressor)=~2.5e-2 - self.assertLess(results['average_loss'], 1e-4) - - def testCalibratedLinearRegressorWeightedTraining1D(self): - feature_columns = [tf.feature_column.numeric_column('x')] - weight_column = tf.feature_column.numeric_column('zero') - estimator = self._CalibratedLinearRegressor(['x'], - feature_columns, - weight_column=weight_column) - estimator.train(input_fn=self._test_data.oned_zero_weight_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.oned_zero_weight_input_fn()) - # Expects almost zero since the weight values are exactly zero. - self.assertLess(results['average_loss'], 1e-7) - - def testCalibratedLinearMonotonicRegressorTraining1D(self): - feature_columns = [ - tf.feature_column.numeric_column('x'), - ] - estimator = self._CalibratedLinearRegressor( - ['x'], - feature_columns, - feature__x__monotonicity=+1, - feature__x__missing_input_value=-1.0) - estimator.train(input_fn=self._test_data.oned_input_fn()) - _ = estimator.evaluate(input_fn=self._test_data.oned_input_fn()) - - def testCalibratedLinearRegressorTraining1DWithCalibrationRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x'), - ] - estimator = self._CalibratedLinearRegressor( - ['x'], - feature_columns, - calibration_l1_reg=0.001, - calibration_l2_reg=0.001, - calibration_l1_laplacian_reg=0.001, - calibration_l2_laplacian_reg=0.001) - estimator.train(input_fn=self._test_data.oned_input_fn()) - results = estimator.evaluate(input_fn=self._test_data.oned_input_fn()) - self.assertLess(results['average_loss'], 1e-2) - - def testCalibratedLinearRegressorTraining2D(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedLinearRegressor(['x0', 'x1'], feature_columns) - estimator.train(input_fn=self._test_data.twod_input_fn()) - results = estimator.evaluate(input_fn=self._test_data.twod_input_fn()) - # For the record: - # Loss(CalibratedLinear)=~6.9e-5 - # Loss(LinearRegressor)=~3.3e-2 - self.assertLess(results['average_loss'], 1e-4) - - def testCalibratedLinearRegressorTraining3D(self): - # Tests also categorical features that has a limited number - # of valid values. - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - tf.feature_column.categorical_column_with_vocabulary_list( - 'x2', ['Y', 'N']) - ] - with tf.Graph().as_default(): - estimator = self._CalibratedLinearRegressorWithQuantiles( - ['x0', 'x1', 'x2'], feature_columns) - estimator.train(input_fn=self._test_data.threed_input_fn(False, 4)) - results = estimator.evaluate( - input_fn=self._test_data.threed_input_fn(False, 1)) - # For the record: - # average_loss(CalibratedLinear, 4 epochs)=~1e-5 - # average_loss(LinearRegressor, 100 epochs)=~0.159 - self.assertLess(results['average_loss'], 1e-4) - - def testCalibratedLinearRegressorTrainingMultiDimensionalFeature(self): - feature_columns = [ - tf.feature_column.numeric_column('x', shape=(2,)), - ] - - # With calibration. - estimator = self._CalibratedLinearRegressor(['x'], feature_columns) - estimator.train(input_fn=self._test_data.multid_feature_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.multid_feature_input_fn()) - # For the record: - # Loss(CalibratedLinear)=~6.6e-5 - # Loss(LinearRegressor)=~3.2e-2 - self.assertLess(results['average_loss'], 1e-4) - - # Turn-off calibration for feature 'x', it should turn if off for both - # dimensions. - estimator = self._CalibratedLinearRegressor(['x'], - feature_columns, - feature__x__num_keypoints=0) - estimator.train(input_fn=self._test_data.multid_feature_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.multid_feature_input_fn()) - self.assertGreater(results['average_loss'], 1e-2) - - def testCalibratedLinearClassifierTraining(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedLinearClassifier(['x0', 'x1'], feature_columns) - estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.twod_classificer_input_fn()) - # For the record: - # auc(CalibratedLinear)=~0.999 - # auc(LinearClassifier)=~0.481 - self.assertGreater(results['auc'], 0.990) - - def testCalibratedLinearClassifierTrainingWithCalibrationRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedLinearClassifier( - ['x0', 'x1'], - feature_columns, - calibration_l1_reg=0.001, - calibration_l2_reg=0.03, - calibration_l1_laplacian_reg=0.03, - calibration_l2_laplacian_reg=0.05) - estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.twod_classificer_input_fn()) - self.assertGreater(results['auc'], 0.980) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_lattice/python/estimators/calibrated_rtl.py b/tensorflow_lattice/python/estimators/calibrated_rtl.py deleted file mode 100644 index a36d6a1..0000000 --- a/tensorflow_lattice/python/estimators/calibrated_rtl.py +++ /dev/null @@ -1,562 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""CalibratedRtl canned estimators.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import copy -import os -import random - -# Dependency imports -import six -import tensorflow as tf - -from tensorflow_lattice.python.estimators import calibrated as calibrated_lib -from tensorflow_lattice.python.estimators import hparams as tfl_hparams -from tensorflow_lattice.python.lib import lattice_layers -from tensorflow_lattice.python.lib import regularizers -from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import - -_EPSILON = 1e-7 - -_RTL_STRUCTURE_FILE = 'rtl_structure.csv' - - -class _CalibratedRtl(calibrated_lib.Calibrated): - """Base class for CalibratedRtl{Classifier|Regressor}.""" - - def __init__(self, - n_classes, - feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - lattice_initializers_fn=None, - optimizer=None, - config=None, - hparams=None, - head=None, - weight_column=None): - """Construct CalibrateRtlClassifier/Regressor.""" - if not hparams: - hparams = tfl_hparams.CalibratedRtlHParams([]) - self.check_hparams(hparams) - hparams = self._adjust_calibration_params(hparams) - - self.lattice_initializers_fn_ = lattice_initializers_fn - - super(_CalibratedRtl, - self).__init__(n_classes, feature_columns, model_dir, quantiles_dir, - keypoints_initializers_fn, optimizer, config, hparams, - head, weight_column, 'rtl') - self._structure_file = os.path.join(self._model_dir, _RTL_STRUCTURE_FILE) - - def _check_per_feature_param_configuration( - self, adjusted, monotonicity, lattice_size, calibration_output_min, - calibration_output_max, calibration_bound, missing_input_value, - missing_vertex): - """Check parameter configuration and returns the error messages.""" - error_messages = [] - if monotonicity not in {-1, 0, +1}: - error_messages.append('monotonicity should be an integer {-1, 0, +1} ' - 'but is %s' % monotonicity) - - if lattice_size < 2: - error_messages.append('lattice_size should be greater than equal to 2' - 'but is %d' % (lattice_size)) - - if not calibration_bound: - error_messages.append( - 'A lattice expects an bounded input from a calibration layer, but ' - 'calibration_bound is set to be False') - - if not adjusted: - if calibration_output_min is not None: - error_messages.append( - 'calibration_output_min=%d should not be set, it is adjusted ' - 'automatically to match the lattice_size' % calibration_output_min) - if calibration_output_max is not None: - error_messages.append( - 'calibration_output_max=%d should not be set, it is adjusted ' - 'automatically to match the lattice_size' % calibration_output_max) - - if missing_input_value is None and missing_vertex: - error_messages.append( - 'missing_vertex is True, however missing_input_value not set') - - return error_messages - - def _check_not_allowed_feature_params(self, hparams): - """Check hparams contains feature-level value that are not allowed. - - Certain values cannot be feature-level hyperparameters. This function checks - whether any of feature sets hparams that are not allowed to be feature-level - hyperparameter, and returns non-empty error messages if there is an error. - - Args: - hparams: (CalibratedRtlHparams) hyperparameters needs to be checked. - Returns: - error_messages: (list of strings) error messages. - """ - not_allowed_feature_params = map( - 'lattice_{}'.format, - regularizers.LATTICE_MULTI_DIMENSIONAL_REGULARIZERS) - error_messages = [] - for param in not_allowed_feature_params: - for feature_name in hparams.get_feature_names(): - if hparams.is_feature_set_param(feature_name, param): - error_messages.append('feature %s sets %s, which is not allowed.' % - (feature_name, param)) - return error_messages - - def check_hparams(self, hparams, adjusted=False): - """Check pre-conditions of hparams. - - Args: - hparams: (tfl_hparams.CalibratedRtlHParams) Hyperparameter to be - examined. - adjusted: if these are the parameters already adjusted. For example, - calibrator_output_min and max should be adjusted so that the output is - in [0, lattice_size - 1] (or [0, lattice_size] if missing_vertex - == True) and calibrator bound should set to be true, etc. - If adjust is True, we will check that all the parameter values is valid, - otherwise, some checks will be skipped. - Raises: - ValueError: If the hyperparameter configuration is invalid, for example - calibration_monotonic is None, but lattice_monotonic is True, then raise - the error with a root cause. - """ - error_messages = self._check_not_allowed_feature_params(hparams) - - # Check lattice_rank and num_lattices. - lattice_rank = hparams.get_param('lattice_rank') - num_lattices = hparams.get_param('num_lattices') - if lattice_rank is None or num_lattices is None: - error_messages.append('lattice_rank and num_lattices should be provided') - - # Check global params. - feature_names = hparams.get_feature_names() - packed_feature_values = hparams.get_global_and_feature_params([ - 'monotonicity', 'lattice_size', 'calibration_output_min', - 'calibration_output_max', 'calibration_bound', 'missing_input_value', - 'missing_vertex' - ], feature_names) - default_feature_values, per_feature_values = packed_feature_values - param_error_messages = self._check_per_feature_param_configuration( - adjusted, *default_feature_values) - if param_error_messages: - error_messages.append('Error message for default feature param:') - error_messages += param_error_messages - - # Check per feature params. hparams.get_feature_names() will only return - # feature names that sets per feature parameters. - for feature_idx in range(len(per_feature_values)): - param_error_messages = self._check_per_feature_param_configuration( - adjusted, *per_feature_values[feature_idx]) - if param_error_messages: - error_messages.append( - 'Error message for %s feature param:' % feature_names[feature_idx]) - error_messages += param_error_messages - - if error_messages: - raise ValueError( - 'Hyperparameter configuration cannot be used in the calibrated ' - 'rtl estimator. Error messages report the issue per feature, but' - ' the parameter may be inherited from global parameter.\nDetailed ' - 'error messsages\n%s' % '\n'.join(error_messages)) - - def _adjust_calibration_params(self, hparams): - """Adjust the calibration parameters to match the input siz of lattices.""" - hparams = copy.deepcopy(hparams) - feature_names = hparams.get_feature_names() - packed_feature_values = hparams.get_global_and_feature_params( - ['lattice_size', 'missing_input_value', 'missing_vertex'], - feature_names) - default_feature_values, per_feature_values = packed_feature_values - final_lattice_size, missing_output_value = self._calibration_params( - *default_feature_values) - lattice_size = default_feature_values[0] - hparams.set_param('calibration_output_min', 0) - hparams.set_param('calibration_output_max', lattice_size - 1) - hparams.set_param('final_lattice_size', final_lattice_size) - hparams.set_param('missing_output_value', missing_output_value) - - if len(per_feature_values) != len(feature_names): - raise ValueError( - 'length of per_feature_value (%d) != length of feature_names (%d)' % - (len(per_feature_values), len(feature_names))) - for (per_feature_value, feature_name) in zip(per_feature_values, - feature_names): - final_lattice_size, missing_output_value = self._calibration_params( - *per_feature_value) - lattice_size = per_feature_value[0] - hparams.set_feature_param(feature_name, 'calibration_output_min', 0) - hparams.set_feature_param(feature_name, 'calibration_output_max', - lattice_size - 1) - hparams.set_feature_param(feature_name, 'final_lattice_size', - final_lattice_size) - hparams.set_feature_param(feature_name, 'missing_output_value', - missing_output_value) - return hparams - - def _calibration_params(self, lattice_size, missing_input_value, - missing_vertex): - """Returns final_lattice_size and missing_output_value.""" - if missing_input_value is None or not missing_vertex: - return lattice_size, None - - # Last vertex of the lattice is reserved for missing values. - return lattice_size + 1, lattice_size - - def _load_structure(self): - """Load rtl structure from model_dir.""" - if not file_io.file_exists(self._structure_file): - raise ValueError( - 'Structure file does not exists in %s!' % self._structure_file) - structure_csv_string = file_io.read_file_to_string(self._structure_file) - structure_csvs = structure_csv_string.split('\n') - structure = [] - for structure_csv in structure_csvs: - structure.append([int(idx) for idx in structure_csv.split(',')]) - return structure - - def _save_structure(self, structure): - """Save rtl structure to model_dir.""" - structure_csvs = [] - for lattice in structure: - structure_csvs.append(','.join([str(idx) for idx in lattice])) - structure_csv_string = '\n'.join(structure_csvs) - file_io.write_string_to_file(self._structure_file, structure_csv_string) - - - def _create_structure(self, input_dim, num_lattices, lattice_rank, rtl_seed): - """Create and save rtl structure to model_dir.""" - rtl_random = random.Random(rtl_seed) - structure = [] - for _ in range(num_lattices): - structure.append( - rtl_random.sample(six.moves.xrange(input_dim), lattice_rank)) - return structure - - def calibration_structure_builder(self, columns_to_tensors, hparams): - """Returns the calibration structure of the model. See base class.""" - return None - - def prediction_builder_from_calibrated( - self, mode, per_dimension_feature_names, hparams, calibrated): - """Construct the prediciton.""" - - self.check_hparams(hparams, adjusted=True) - lattice_sizes = [ - hparams.get_feature_param(f, 'final_lattice_size') - for f in per_dimension_feature_names - ] - lattice_monotonic = [(hparams.get_feature_param(f, 'monotonicity') != 0) - for f in per_dimension_feature_names] - num_lattices = hparams.get_param('num_lattices') - lattice_rank = hparams.get_param('lattice_rank') - rtl_seed = hparams.get_param('rtl_seed') - interpolation_type = hparams.get_param('interpolation_type') - # Create and save structure if it does not exists. - if not file_io.file_exists(self._structure_file): - structure = self._create_structure( - len(lattice_sizes), num_lattices, lattice_rank, rtl_seed) - self._save_structure(structure) - structure = self._load_structure() - # Check structure is what we expect. - if len(structure) != num_lattices: - raise ValueError( - 'Expect %d number of lattices, but found %d number of lattices in ' - 'structure: %s' % (num_lattices, len(structure), str(structure))) - for each_lattice in structure: - if len(each_lattice) != lattice_rank: - raise ValueError('Expect %d lattice rank, but found %d in structure: %s' - % (lattice_rank, len(each_lattice), str(structure))) - - # Setup the regularization. - regularizer_amounts = {} - for reg_name in regularizers.LATTICE_MULTI_DIMENSIONAL_REGULARIZERS: - regularizer_amounts[reg_name] = hparams.get_param( - 'lattice_{}'.format(reg_name)) - for reg_name in regularizers.LATTICE_ONE_DIMENSIONAL_REGULARIZERS: - regularizer_amounts[reg_name] = [ - hparams.get_feature_param(feature_name, 'lattice_{}'.format(reg_name)) - for feature_name in per_dimension_feature_names - ] - - packed_results = lattice_layers.ensemble_lattices_layer( - calibrated, - lattice_sizes, - structure, - is_monotone=lattice_monotonic, - interpolation_type=interpolation_type, - lattice_initializers=self.lattice_initializers_fn_, - **regularizer_amounts) - (output_tensors, _, projection_ops, regularization) = packed_results - # Take an average of output_tensors and add bias. - output_tensor = tf.stack( - output_tensors, axis=0, name='stacked_output') - ensemble_output = tf.reduce_mean(output_tensor, axis=0) - ensemble_bias_init = hparams.get_param('ensemble_bias') - b = tf.Variable([ensemble_bias_init], name='ensemble_bias') - prediction = ensemble_output + b - - # Returns prediction Tensor, projection ops, and regularization. - return prediction, projection_ops, regularization - - -def calibrated_rtl_classifier(feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None, - head=None, - weight_column=None): - """Calibrated rtl binary classifier model. - - - - This model uses a piecewise lattice calibration function on each of the - inputs (parametrized) and then feeds them to ensemble of random lattices. - num_lattices and lattice_rank (number of inputs to each lattice) must be - specified in the hyperparameter. Optionally calibration can be made monotonic. - - It usually requires a preprocessing step on the data, to calculate the - quantiles of each used feature. This can be done locally or in one worker - only before training, in a separate invocation of your program (or directly). - Typically this can be saved (`save_dir` parameter) to the same - directory where the data is. - - Hyper-parameters are given in the form of the object - tfl_hparams.CalibrationRtlHParams. lattice_rank and num_lattices must - be specified; there would be no default value for this. It also takes in - per-feature parameters. - - Internally values will be converted to tf.float32. - - Example: - - ```python - def input_fn_train: ... - def input_fn_eval: ... - - my_feature_columns=[...] - - # Have a separate program flag to generate the quantiles. Need to be run - # only once. - if FLAGS.create_quantiles: - pwl_calibrators_layers.calculate_quantiles_for_keypoints( - input_fn=input_fn_train, - feature_columns=my_feature_columns, - save_dir=FLAGS.data_dir, - num_quantiles=1000, - override=True) - return # Exit program. - - hparams = hparams.CalibratedRtlHparams(num_lattices=10, lattice_rank=2) - estimator = calibrated_rtl.calibrated_rtl_classifier( - feature_columns=feature_columns, hparams=hparams) - estimator.train(input_fn=input_fn_train) - estimator.evaluate(input_fn=input_fn_eval) - estimator.predict(input_fn=input_fn_predict) - ``` - - Args: - feature_columns: Optional, an iteratable containing all the feature - columns used by the model. All items in the set should be instances of - classes derived from `FeatureColumn`. If not given, the model will - use as features the tensors returned by input_fn. - Supported types of columns: RealValuedColumn. - model_dir: Directory to save model parameters, graphs and etc. This can - also be used to load checkpoints from the directory into an estimator to - continue training a previously saved model. - quantiles_dir: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated only once with - `pwl_calibration_layers.calculate_quantiles_for_keypoints` in a separate - invocation of your program. If you don't want to use quantiles, you can - set `keypoints_initializer` instead. - keypoints_initializers_fn: if you know the distribution of your - input features you can provide that directly instead of `quantiles_dir`. - See `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be - a closure that returns a pair of tensors with keypoints inputs and - outputs to use for initialization (must match `num_keypoints` configured - in `hparams`). Alternatively the closure can return a dict mapping - feature name to pairs for initialization per feature. If `quantiles_dir` - and `keypoints_initializers_fn` are set, the later takes precendence, - and the features for which `keypoints_initializers` are not defined - fallback to using the quantiles found in `quantiles_dir`. It uses a - closure instead of the tensors themselves because the graph has to be - created at the time the model is being build, which happens at a later - time. - optimizer: string, `Optimizer` object, or callable that defines the - optimizer to use for training -- if a callable, it will be called with - learning_rate=hparams.learning_rate. - config: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). - hparams: an instance of tfl_hparams.CalibrationRtlHParams. If set to - None default parameters are used. - head: a `TensorFlow Estimator Head` which specifies how the loss function, - final predictions, and so on are generated from model outputs. Defaults - to using a sigmoid cross entropy head for binary classification and mean - squared error head for regression. - weight_column: A string or a `tf.feature_column.numeric_column` defining - feature column representing weights. It is used to down weight or boost - examples during training. It will be multiplied by the loss of the - example. - - Returns: - A `calibrated_rtl_classifier` estimator. - - Raises: - ValueError: invalid parameters. - KeyError: type of feature not supported. - """ - return _CalibratedRtl( - n_classes=2, - feature_columns=feature_columns, - model_dir=model_dir, - quantiles_dir=quantiles_dir, - keypoints_initializers_fn=keypoints_initializers_fn, - optimizer=optimizer, - config=config, - hparams=hparams, - head=head, - weight_column=weight_column) - - -def calibrated_rtl_regressor(feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None, - head=None, - weight_column=None): - """Calibrated rtl regressor model. - - This model uses a piecewise lattice calibration function on each of the - inputs (parametrized) and then feeds them to ensemble of random lattices. - num_lattices and lattice_rank (number of inputs to each lattice) must be - specified in the hyperparameter. Optionally calibration can be made monotonic. - - It usually requires a preprocessing step on the data, to calculate the - quantiles of each used feature. This can be done locally or in one worker - only before training, in a separate invocation of your program (or directly). - Typically this can be saved (`save_dir` parameter) to the same - directory where the data is. - - Hyper-parameters are given in the form of the object - tfl_hparams.CalibrationRtlHParams. lattice_rank and num_lattices must - be specified; there would be no default value for this. It also takes in - per-feature parameters. - - Internally values will be converted to tf.float32. - - Example: - - ```python - def input_fn_train: ... - def input_fn_eval: ... - - my_feature_columns=[...] - - # Have a separate program flag to generate the quantiles. Need to be run - # only once. - if FLAGS.create_quantiles: - pwl_calibrators_layers.calculate_quantiles_for_keypoints( - input_fn=input_fn_train, - feature_columns=my_feature_columns, - save_dir=FLAGS.data_dir, - num_quantiles=1000, - override=True) - return # Exit program. - - hparams = hparams.CalibratedRtlHparams(num_lattices=10, lattice_rank=2) - estimator = calibrated_rtl.calibrated_rtl_classifier( - feature_columns=feature_columns, hparams=hparams) - estimator.train(input_fn=input_fn_train) - estimator.evaluate(input_fn=input_fn_eval) - estimator.predict(input_fn=input_fn_test) - ``` - - Args: - feature_columns: Optional, an iteratable containing all the feature - columns used by the model. All items in the set should be instances of - classes derived from `FeatureColumn`. If not given, the model will - use as features the tensors returned by input_fn. - Supported types of columns: RealValuedColumn. - model_dir: Directory to save model parameters, graphs and etc. This can - also be used to load checkpoints from the directory into an estimator to - continue training a previously saved model. - quantiles_dir: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated only once with - `pwl_calibration_layers.calculate_quantiles_for_keypoints` in a separate - invocation of your program. If you don't want to use quantiles, you can - set `keypoints_initializer` instead. - keypoints_initializers_fn: if you know the distribution of your - input features you can provide that directly instead of `quantiles_dir`. - See `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be - a closure that returns a pair of tensors with keypoints inputs and - outputs to use for initialization (must match `num_keypoints` configured - in `hparams`). Alternatively the closure can return a dict mapping - feature name to pairs for initialization per feature. If `quantiles_dir` - and `keypoints_initializers_fn` are set, the later takes precendence, - and the features for which `keypoints_initializers` are not defined - fallback to using the quantiles found in `quantiles_dir`. It uses a - closure instead of the tensors themselves because the graph has to be - created at the time the model is being build, which happens at a later - time. - optimizer: string, `Optimizer` object, or callable that defines the - optimizer to use for training -- if a callable, it will be called with - learning_rate=hparams.learning_rate. - config: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). - hparams: an instance of tfl_hparams.CalibrationRtlHParams. If set to - None default parameters are used. - head: a `TensorFlow Estimator Head` which specifies how the loss function, - final predictions, and so on are generated from model outputs. Defaults - to using a sigmoid cross entropy head for binary classification and mean - squared error head for regression. - weight_column: A string or a `tf.feature_column.numeric_column` defining - feature column representing weights. It is used to down weight or boost - examples during training. It will be multiplied by the loss of the - example. - - Returns: - A `calibrated_rtl_regressor` estimator. - - Raises: - ValueError: invalid parameters. - KeyError: type of feature not supported. - """ - return _CalibratedRtl( - n_classes=0, - feature_columns=feature_columns, - model_dir=model_dir, - quantiles_dir=quantiles_dir, - keypoints_initializers_fn=keypoints_initializers_fn, - optimizer=optimizer, - config=config, - hparams=hparams, - head=head, - weight_column=weight_column) diff --git a/tensorflow_lattice/python/estimators/calibrated_rtl_test.py b/tensorflow_lattice/python/estimators/calibrated_rtl_test.py deleted file mode 100644 index 5deb2cf..0000000 --- a/tensorflow_lattice/python/estimators/calibrated_rtl_test.py +++ /dev/null @@ -1,456 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""CalibratedRtl provide canned estimators.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - -from tensorflow_lattice.python.estimators import calibrated_rtl -from tensorflow_lattice.python.estimators import hparams as tfl_hparams -from tensorflow_lattice.python.lib import keypoints_initialization -from tensorflow_lattice.python.lib import test_data - -_NUM_KEYPOINTS = 50 - - -class CalibratedRtlHParamsTest(tf.test.TestCase): - - def setUp(self): - super(CalibratedRtlHParamsTest, self).setUp() - self.hparams = tfl_hparams.CalibratedRtlHParams(feature_names=['x']) - self.hparams.set_param('lattice_size', 2) - self.hparams.set_param('calibrator_output_min', 0) - self.hparams.set_param('calibrator_output_max', 1) - self.hparams.set_param('calibration_bound', True) - self.hparams.set_param('lattice_rank', 2) - self.hparams.set_param('num_lattices', 10) - self.empty_estimator = calibrated_rtl.calibrated_rtl_classifier( - hparams=self.hparams) - - def testWrongLatticeSize(self): - self.hparams.set_feature_param('x', 'lattice_size', -1) - self.assertRaisesRegexp( - ValueError, - 'Hyperparameter configuration cannot be used in the calibrated rtl ' - 'estimator.', self.empty_estimator.check_hparams, self.hparams) - - def testWrongCalibrationOutputMin(self): - self.hparams.set_param('calibration_output_min', 0.0) - self.hparams.set_feature_param('x', 'calibration_output_min', -1) - self.assertRaisesRegexp( - ValueError, - 'calibration_output_min=-1 should not be set, it is adjusted ' - 'automatically to match the lattice_size', - self.empty_estimator.check_hparams, self.hparams) - - def testWrongCalibrationOutputMax(self): - self.hparams.set_param('calibration_output_max', 0.0) - self.hparams.set_feature_param('x', 'calibration_output_max', 10) - self.assertRaisesRegexp( - ValueError, - 'calibration_output_max=10 should not be set, it is adjusted ' - 'automatically to match the lattice_size', - self.empty_estimator.check_hparams, self.hparams) - - def testWrongCalibrationBound(self): - self.hparams.set_feature_param('x', 'calibration_bound', False) - self.assertRaisesRegexp( - ValueError, - 'Hyperparameter configuration cannot be used in the calibrated rtl ' - 'estimator.', self.empty_estimator.check_hparams, self.hparams) - - def testNoLatticeRank(self): - self.hparams.set_param('lattice_rank', None) - self.assertRaisesRegexp( - ValueError, - 'Hyperparameter configuration cannot be used in the calibrated rtl ' - 'estimator.', self.empty_estimator.check_hparams, self.hparams) - - def testNoNumLattices(self): - self.hparams.set_param('num_lattices', None) - self.assertRaisesRegexp( - ValueError, - 'Hyperparameter configuration cannot be used in the calibrated rtl ' - 'estimator.', self.empty_estimator.check_hparams, self.hparams) - - def testWrongLatticeRegularization(self): - self.hparams.set_feature_param('x', 'lattice_l1_reg', 0.1) - self.hparams.set_feature_param('x', 'lattice_l2_reg', 0.1) - self.hparams.set_feature_param('x', 'lattice_l1_torsion_reg', 0.1) - self.hparams.set_feature_param('x', 'lattice_l1_torsion_reg', 0.1) - self.assertRaisesRegexp( - ValueError, - 'Hyperparameter configuration cannot be used in the calibrated rtl ' - 'estimator.', self.empty_estimator.check_hparams, self.hparams) - - -class CalibratedRtlTest(tf.test.TestCase): - - def setUp(self): - super(CalibratedRtlTest, self).setUp() - self._test_data = test_data.TestData(num_epochs=10) - - def _CalibratedRtlRegressor(self, - feature_names, - feature_columns, - num_lattices=1, - lattice_rank=1, - num_keypoints=_NUM_KEYPOINTS, - weight_column=None, - **hparams_args): - - def init_fn(): - return keypoints_initialization.uniform_keypoints_for_signal( - num_keypoints, -1., 1., 0., 1.) - - hparams = tfl_hparams.CalibratedRtlHParams( - feature_names, - num_keypoints=num_keypoints, - num_lattices=num_lattices, - lattice_rank=lattice_rank, - **hparams_args) - # Turn off monotonic calibrator. - hparams.set_param('calibration_monotonic', None) - hparams.set_param('learning_rate', 0.1) - - return calibrated_rtl.calibrated_rtl_regressor( - feature_columns=feature_columns, - weight_column=weight_column, - hparams=hparams, - keypoints_initializers_fn=init_fn) - - def _CalibratedRtlClassifier(self, - feature_columns, - num_lattices=1, - lattice_rank=1, - **hparams_args): - - def init_fn(): - return keypoints_initialization.uniform_keypoints_for_signal( - _NUM_KEYPOINTS, -1., 1., 0., 1.) - - hparams = tfl_hparams.CalibratedRtlHParams( - num_keypoints=_NUM_KEYPOINTS, - num_lattices=num_lattices, - lattice_rank=lattice_rank, - **hparams_args) - # Turn off monotonic calibrator. - hparams.set_param('calibration_monotonic', None) - hparams.set_param('learning_rate', 0.1) - - return calibrated_rtl.calibrated_rtl_classifier( - feature_columns=feature_columns, - hparams=hparams, - keypoints_initializers_fn=init_fn) - - def testCalibratedRtlRegressorTraining1D(self): - feature_columns = [ - tf.feature_column.numeric_column('x'), - ] - estimator = self._CalibratedRtlRegressor( - ['x'], feature_columns, num_lattices=3, lattice_rank=1) - estimator.train(input_fn=self._test_data.oned_input_fn()) - results = estimator.evaluate(input_fn=self._test_data.oned_input_fn()) - self.assertLess(results['average_loss'], 1e-3) - - def testCalibratedRtlRegressorWeightedTraining1D(self): - feature_columns = [tf.feature_column.numeric_column('x')] - weight_column = tf.feature_column.numeric_column('zero') - estimator = self._CalibratedRtlRegressor( - ['x'], feature_columns, weight_column=weight_column) - estimator.train(input_fn=self._test_data.oned_zero_weight_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.oned_zero_weight_input_fn()) - # Expects almost zero since the weight values are exactly zero. - self.assertLess(results['average_loss'], 1e-7) - - def testCalibratedRtlRegressorTraining2D(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedRtlRegressor( - ['x0', 'x1'], feature_columns, num_lattices=3, lattice_rank=2) - estimator.train(input_fn=self._test_data.twod_input_fn()) - results = estimator.evaluate(input_fn=self._test_data.twod_input_fn()) - self.assertLess(results['average_loss'], 5e-3) - - def testCalibratedRtlRegressorTraining2DWithCalibrationRegularization(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedRtlRegressor( - ['x0', 'x1'], - feature_columns, - num_lattices=3, - lattice_rank=2, - calibration_l1_reg=1e-2, - calibration_l2_reg=1e-2, - calibration_l1_laplacian_reg=0.05, - calibration_l2_laplacian_reg=0.01) - estimator.train(input_fn=self._test_data.twod_input_fn()) - results = estimator.evaluate(input_fn=self._test_data.twod_input_fn()) - self.assertLess(results['average_loss'], 0.1) - - def testCalibratedLatticeRegressorTraining2DWithLatticeRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedRtlRegressor( - ['x0', 'x1'], - feature_columns, - num_lattics=5, - lattice_rank=2, - lattice_l1_reg=1.0, - lattice_l2_reg=1.0, - lattice_l1_torsion_reg=1.0, - lattice_l2_torsion_reg=1.0, - lattice_l1_laplacian_reg=1.0, - lattice_l2_laplacian_reg=0.1) - estimator.train(input_fn=self._test_data.twod_input_fn()) - results = estimator.evaluate(input_fn=self._test_data.twod_input_fn()) - # We expect the loss is larger than the loss without regularization. - self.assertGreater(results['average_loss'], 1e-2) - self.assertLess(results['average_loss'], 0.5) - - def testCalibratedLatticeRegressorTraining2DWithPerFeatureRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedRtlRegressor( - ['x0', 'x1'], - feature_columns, - num_lattics=5, - lattice_rank=2, - feature__x0__lattice_l1_laplacian_reg=100.0, - feature__x1__lattice_l2_laplacian_reg=1.0) - estimator.train(input_fn=self._test_data.twod_input_fn()) - results = estimator.evaluate(input_fn=self._test_data.twod_input_fn()) - # We expect the loss is larger than the loss without regularization. - self.assertGreater(results['average_loss'], 0.1) - self.assertLess(results['average_loss'], 0.2) - - def testCalibratedRtlRegressorTrainingMultiDimensionalFeature(self): - feature_columns = [ - tf.feature_column.numeric_column('x', shape=(2,)), - ] - estimator = self._CalibratedRtlRegressor( - ['x'], - feature_columns, - num_lattices=3, - lattice_rank=2,) - estimator.train(input_fn=self._test_data.multid_feature_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.multid_feature_input_fn()) - self.assertLess(results['average_loss'], 2e-2) - - # Turn-off calibration for feature 'x', it should turn off for both - # dimensions, and the results should get much worse. - estimator = self._CalibratedRtlRegressor( - ['x'], - feature_columns, - num_lattices=3, - lattice_rank=2, - feature__x__num_keypoints=0) - estimator.train(input_fn=self._test_data.multid_feature_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.multid_feature_input_fn()) - self.assertGreater(results['average_loss'], 4e-2) - - def testCalibratedRtlClassifierTraining(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedRtlClassifier( - feature_columns, num_lattices=3, lattice_rank=2) - estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.twod_classificer_input_fn()) - self.assertGreater(results['auc'], 0.990) - - def testCalibratedRtlClassifierTrainingWithCalibrationRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedRtlClassifier( - feature_columns, - num_lattices=3, - lattice_rank=2, - interpolation_type='simplex', - calibration_l1_reg=1e-5, - calibration_l2_reg=1e-5, - calibration_l1_laplacian_reg=1e-5, - calibration_l2_laplacian_reg=1e-5) - estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.twod_classificer_input_fn()) - self.assertGreater(results['auc'], 0.980) - - def testCalibratedRtlClassifierTrainingWithLatticeRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedRtlClassifier( - feature_columns, - num_lattices=3, - lattice_rank=2, - interpolation_type='hypercube', - lattice_l1_reg=1.0, - lattice_l2_reg=1.0, - lattice_l1_torsion_reg=1.0, - lattice_l2_torsion_reg=1.0, - lattice_l1_laplacian_reg=1.0, - lattice_l2_laplacian_reg=0.1) - estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.twod_classificer_input_fn()) - # We expect AUC is worse than the model without regularization. - self.assertLess(results['auc'], 0.99) - self.assertGreater(results['auc'], 0.6) - - def testCalibratedRtlClassifierTrainingWithPerFeatureRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedRtlClassifier( - feature_columns, - feature_names=['x0', 'x1'], - num_lattices=3, - lattice_rank=2, - feature__x0__lattice_l1_laplacian_reg=5.0, - feature__x1__lattice_l2_laplacian_reg=0.1) - estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.twod_classificer_input_fn()) - # We expect AUC is worse than the model without regularization. - self.assertLess(results['auc'], 0.98) - self.assertGreater(results['auc'], 0.75) - - def testCalibratedRtlMonotonicClassifierTraining(self): - # Construct the following training/testing pair. - # - # Training: (x, y) - # ([0., 0.], 0.0) - # ([0., 1.], 1.0) - # ([1., 0.], 1.0) - # ([1., 1.], 0.0) - # - # Test: (x, y) - # ([0., 0.], 0.0) - # ([0., 1.], 1.0) - # ([1., 0.], 1.0) - # ([1., 1.], 1.0) - # - # Note that training example has a noisy sample, ([1., 1.], 0.0), and test - # examples are generated by the logical-OR function. Therefore by enforcing - # increasing monotonicity to all features, we should be able to work well - # in the test examples. - x0 = np.array([0.0, 0.0, 1.0, 1.0]) - x1 = np.array([0.0, 1.0, 0.0, 1.0]) - x_samples = {'x0': x0, 'x1': x1} - training_y = np.array([[False], [True], [True], [False]]) - test_y = np.array([[False], [True], [True], [True]]) - - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x=x_samples, y=training_y, batch_size=4, num_epochs=1000, shuffle=False) - test_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x=x_samples, y=test_y, shuffle=False) - - # Define monotonic lattice classifier. - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - - def init_fn(): - return keypoints_initialization.uniform_keypoints_for_signal( - 2, 0., 1., 0., 1.) - - hparams = tfl_hparams.CalibratedRtlHParams( - num_keypoints=2, num_lattices=3, lattice_rank=2) - # Monotonic calibrated lattice. - - hparams.set_param('monotonicity', +1) - hparams.set_param('learning_rate', 0.1) - hparams.set_param('interpolation_type', 'hypercube') - - estimator = calibrated_rtl.calibrated_rtl_classifier( - feature_columns=feature_columns, - hparams=hparams, - keypoints_initializers_fn=init_fn) - - estimator.train(input_fn=train_input_fn) - results = estimator.evaluate(input_fn=test_input_fn) - # We should expect 1.0 accuracy. - self.assertGreater(results['accuracy'], 0.999) - - def testCalibratedRtlWithMissingTraining(self): - # x0 is missing with it's own vertex: so it can take very different values, - # while x1 is missing and calibrated, in this case to the middle of the - # lattice. - x0 = np.array([0., 0., 1., 1., -1., -1., 0., 1.]) - x1 = np.array([0., 1., 0., 1., 0., 1., -1., -1.]) - training_y = np.array([1., 3., 7., 11., 23., 27., 2., 9.]) - x_samples = {'x0': x0, 'x1': x1} - - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x=x_samples, - y=training_y, - batch_size=x0.shape[0], - num_epochs=2000, - shuffle=False) - test_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x=x_samples, y=training_y, shuffle=False) - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - - def init_fn(): - return keypoints_initialization.uniform_keypoints_for_signal( - 2, 0., 1., 0., 1.) - - hparams = tfl_hparams.CalibratedRtlHParams( - ['x0', 'x1'], - num_keypoints=2, - num_lattices=3, - lattice_rank=2, - learning_rate=0.1, - missing_input_value=-1.) - hparams.set_feature_param('x0', 'missing_vertex', True) - - estimator = calibrated_rtl.calibrated_rtl_regressor( - feature_columns=feature_columns, - hparams=hparams, - keypoints_initializers_fn=init_fn) - - estimator.train(input_fn=train_input_fn) - results = estimator.evaluate(input_fn=test_input_fn) - self.assertLess(results['average_loss'], 0.1) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_lattice/python/estimators/calibrated_test.py b/tensorflow_lattice/python/estimators/calibrated_test.py deleted file mode 100644 index 50b1d83..0000000 --- a/tensorflow_lattice/python/estimators/calibrated_test.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Calibrated is an abstract base class. This mostly tests dependencies.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import tempfile -import tensorflow as tf - -from tensorflow_lattice.python.estimators import calibrated as calibrated_lib -from tensorflow_lattice.python.estimators import hparams as lf_hparams - - -class CalibratedFake(calibrated_lib.Calibrated): - """Fake Calibrated class, only used to instantiate the model.""" - - def __init__(self, - n_classes, - feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None): - super(CalibratedFake, self).__init__( - n_classes, feature_columns, model_dir, quantiles_dir, - keypoints_initializers_fn, optimizer, config, hparams, 'Fake') - - def calibration_structure_builder(self, columns_to_tensors, hparams): - return None - - def prediction_builder_from_calibrated( - self, mode, per_dimension_feature_names, hparams, calibrated): - return None - - -class CalibratedTest(tf.test.TestCase): - """Constructor tests only, actual test of the code in CalibratedLinearTest.""" - - def _testConstructor(self, n_classes): - hparams = lf_hparams.CalibratedHParams( - feature_names=['x0', 'x1'], - num_keypoints=20, - feature__x0__num_keypoints=0) - _ = CalibratedFake(n_classes=n_classes, hparams=hparams) - - def testConstructors(self): - self._testConstructor(n_classes=2) - self._testConstructor(n_classes=0) - - def testNumKeypointsInitialization(self): - hparams = lf_hparams.CalibratedHParams( - feature_names=['x0', 'x1'], - num_keypoints=20, - feature__x0__num_keypoints=0) - _ = CalibratedFake(n_classes=2, hparams=hparams) - - # Test that same number of keypoints initialization is fine. - self.assertEqual( - calibrated_lib._update_keypoints( - feature_name='x0', asked_keypoints=20, kp_init_keypoints=20), 20) - - # Test that fewer number of keypoints initialization is fine. - self.assertEqual( - calibrated_lib._update_keypoints( - feature_name='x0', asked_keypoints=20, kp_init_keypoints=10), 10) - - # Test that no calibration is respected. - self.assertEqual( - calibrated_lib._update_keypoints( - feature_name='x1', asked_keypoints=0, kp_init_keypoints=20), 0) - self.assertEqual( - calibrated_lib._update_keypoints( - feature_name='x0', asked_keypoints=None, kp_init_keypoints=20), - None) - - # Test that too many keypoints is not ok! - self.assertRaisesRegexp( - ValueError, - r'Calibration initialization returned more keypoints \(20\) than ' - r'requested \(10\) for feature x0', calibrated_lib._update_keypoints, - 'x0', 10, 20) - - def testCreatedDirectory(self): - # Create and remove temporary directory. - model_dir = tempfile.mkdtemp() - os.rmdir(model_dir) - hparams = lf_hparams.CalibratedHParams( - feature_names=['x0', 'x1'], - num_keypoints=20, - feature__x0__num_keypoints=10) - CalibratedFake(n_classes=2, hparams=hparams, model_dir=model_dir) - self.assertTrue(os.path.exists(model_dir)) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_lattice/python/estimators/hparams.py b/tensorflow_lattice/python/estimators/hparams.py deleted file mode 100644 index f89ef4f..0000000 --- a/tensorflow_lattice/python/estimators/hparams.py +++ /dev/null @@ -1,626 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Hyper-parameters support classes for TensorFlow Lattice estimators.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from distutils.util import strtobool -import six - -from tensorflow_lattice.python.lib import regularizers - - -class PerFeatureHParams(object): - """Parameters object with per feature parametrization. - - Each parameter can be overwritten for specific features by setting - `feature____`, otherwise it falls back to the - global parameter name value ``. - - Parameter types are set from their first value set -- but they can also be - reset by `set_param_type`. - - Example: let's say we have a parameter `lattice_size` that should be 2 if not - specified (global value), but can be overridden per feature; let's assume - there are 3 features: `a`, `b`, and `c` (added after construction). Then: - - ```python - hparams = PerFeatureHParams(["a", "b"], lattice_size=2, - feature__b__lattice_size=3) - hparams.add_feature(["c"]) - hparams.get_param("lattice_size") == 2 - hparams.get_feature_param("a", "lattice_size") == 2 - hparams.get_feature_param("b", "lattice_size") == 3 - hparams.get_feature_param("c", "lattice_size") == 2 - hparams.get_feature_param("d", "lattice_size") raises a ValueError - ``` - - Use the `get_feature_param` method to automatically get the specialized value, - or fall-back to the global one. - - - - - """ - - # Used to separate feature prefix, name and parameter name. - FEATURE_SEPARATOR = '__' - - # Feature prefix for feature specific parameter values. - FEATURE_PREFIX = 'feature' - - def __init__(self, feature_names=None, **kwargs): - """Construct with arbitrary list of parameters. - - Args: - feature_names: list of feature names. Only features names listed here - (or added later with add_feature) can have feature specific parameter - values. - **kwargs: parameters names. - - Returns: - PerFeatureHParams object. - - Raises: - ValueError: if a feature-specific parameter value is set for an - unknown feature. - """ - super(PerFeatureHParams, self).__init__() - self._data = {} - self._params_type = {} - self._feature_names = set( - feature_names) if feature_names is not None else set() - for feature_name in self._feature_names: - PerFeatureHParams._check_feature_name(feature_name) - - # First set the global parameters, so they become known and then feature - # specific parameters. - for param_name, value in six.iteritems(kwargs): - if not PerFeatureHParams._is_feature_specific(param_name): - self.set_param(param_name, value) - for param_name, value in six.iteritems(kwargs): - if PerFeatureHParams._is_feature_specific(param_name): - self.set_param(param_name, value) - - @staticmethod - def _check_feature_name(feature_name): - """Raises ValueError if feature_name is not valid.""" - if (PerFeatureHParams.FEATURE_SEPARATOR in feature_name or - '=' in feature_name): - raise ValueError( - 'Invalid feature name "{}": "{}" and "=" are not supported in ' - 'feature names'.format(feature_name, - PerFeatureHParams.FEATURE_SEPARATOR)) - - @staticmethod - def _is_feature_specific(param_name): - return param_name.startswith(PerFeatureHParams.FEATURE_PREFIX + - PerFeatureHParams.FEATURE_SEPARATOR) - - def get_feature_names(self): - """Returns copy of list of known feature names.""" - feature_names_list = list(self._feature_names) - feature_names_list.sort() - return feature_names_list - - def add_feature(self, feature_name): - """Add feature_name (one name or list of names) to list of known names.""" - if isinstance(feature_name, list): - # Add all elements in the list, if a list. - for f in feature_name: - if not isinstance(f, six.string_types): - raise ValueError( - 'feature_name should either be a list of strings, or a string, ' - 'got "%s"' % feature_name) - PerFeatureHParams._check_feature_name(f) - self._feature_names.add(f) - elif isinstance(feature_name, six.string_types): - PerFeatureHParams._check_feature_name(feature_name) - self._feature_names.add(feature_name) - else: - raise ValueError( - 'feature_name should either be a list of strings, or a string, ' - 'got "%s"' % feature_name) - return self - - def param_name_for_feature(self, feature_name, param_name): - """Returns parameter name for specific feature parameter.""" - if feature_name not in self._feature_names: - raise ValueError('Unknown feature name "%s" for parameter "%s"' % - (feature_name, param_name)) - return PerFeatureHParams.FEATURE_SEPARATOR.join( - [PerFeatureHParams.FEATURE_PREFIX, feature_name, param_name]) - - def is_feature_set_param(self, feature_name, param_name): - """Returns whether param_name parameter is set for feature_name.""" - key = self.param_name_for_feature(feature_name, param_name) - return hasattr(self, key) - - def get_feature_param(self, feature_name, param_name, default=None): - """Returns parameter for feature or falls back to global parameter.""" - key = self.param_name_for_feature(feature_name, param_name) - if hasattr(self, key): - return getattr(self, key, None) - return getattr(self, param_name, default) - - def set_feature_param(self, feature_name, param_name, value): - """Sets parameter value specific for feature. Returns self.""" - if feature_name not in self.get_feature_names(): - raise ValueError( - 'Unknown feature name "%s" when trying to set parameter "%s", known ' - 'values are %s' % (feature_name, param_name, - self.get_feature_names())) - if param_name not in self._params_type: - raise ValueError( - 'Unknown parameter name "%s" when trying to set parameter for ' - 'feature "%s"' % (param_name, feature_name)) - - key = self.param_name_for_feature(feature_name, param_name) - self._data[key] = value - return self - - def get_param(self, param_name, default=None): - """Returns the global parameter or falls back to default.""" - return self._data[param_name] if param_name in self._data else default - - def __getattr__(self, param_name): - if param_name.startswith('_') or param_name not in self._data: - raise AttributeError('No value set for "{}"'.format(param_name)) - return self._data[param_name] - - @staticmethod - def _parse_value(value_str, value_type): - """Parses string a the given value_type.""" - if value_type is str: - return value_str - elif value_type is int: - return int(value_str) - elif value_type is float: - return float(value_str) - elif value_type is bool: - return strtobool(value_str) - - raise ValueError( - 'Do not know how to parse types {} -- value was {!r}'.format( - value_type, value_str)) - - def _set_param(self, param_name, value, parse): - """Sets parameter, optionally parse it.""" - # Make sure that feature specific parameters are properly named. - if PerFeatureHParams._is_feature_specific(param_name): - parts = param_name.split(PerFeatureHParams.FEATURE_SEPARATOR, 3) - if len(parts) != 3: - raise ValueError( - 'Bad formatted feature specific parameter "{}", please use ' - '"{}{}{}"'.format( - param_name, PerFeatureHParams.FEATURE_PREFIX, - PerFeatureHParams.FEATURE_SEPARATOR, - PerFeatureHParams.FEATURE_SEPARATOR)) - if parts[1] not in self._feature_names: - raise ValueError( - 'Unknown feature "{}" for feature specific parameter "{}"'.format( - parts[1], param_name)) - if parts[2] not in self._params_type: - raise ValueError( - 'Unknown parameter name "{}", can not set for feature "{}"'.format( - parts[2], parts[1])) - if parse: - value = PerFeatureHParams._parse_value(value, - self._params_type[parts[2]]) - else: - # Non-feature specific parameter: set _param_type if not yet set. - if param_name not in self._params_type: - if parse: - raise ValueError( - 'Parsing value for unknown parameter "{}"'.format(param_name)) - self._params_type[param_name] = type(value) - elif parse: - value = PerFeatureHParams._parse_value(value, - self._params_type[param_name]) - self._data[param_name] = value - - def set_param(self, param_name, value): - """Sets parameter value. Returns self.""" - self._set_param(param_name, value, parse=False) - return self - - def set_param_type(self, param_name, param_type): - """Sets the parameter type, it must already exist. Returns self.""" - if param_name not in self._params_type: - raise ValueError( - 'Can not set parameter type if parameter has not been set for "{}"'. - format(param_name)) - self._params_type[param_name] = param_type - - def parse_param(self, param_name, value_str): - """Parses parameter values from string. Returns self.""" - self._set_param(param_name, value_str, parse=True) - return self - - def get_global_and_feature_params(self, param_names, feature_names): - """Returns values for multiple params, global and for each feature. - - Args: - param_names: list of parameters to get values for. - feature_names: list of features to get specific values for. - - Returns: - * List of global values for parameters requested in `param_names`. - * List of list of per feature values for parameters requested in - `param_names` for features requested in `feature_names`. - """ - global_values = [self.get_param(param_name) for param_name in param_names] - feature_values = [] - for feature in feature_names: - feature_values.append([ - self.get_feature_param(feature, param_name) - for param_name in param_names - ]) - return (global_values, feature_values) - - def values(self): - """Returns shallow copy of the hyperparameter dict.""" - return {k: v for k, v in six.iteritems(self._data)} - - def __str__(self): - return str(sorted(self.values().items())) - - def parse_hparams(self, hparams): - """Incorporates hyper-parameters from another HParams object. - - Copies over values of hyper-parameters from the given object. New parameters - may be set, but not new features. Also works with - `tf.contrib.training.HParams` objects. - - Args: - hparams: `PerFeatureHParams` object, but also works with the standard - `tf.contrib.training.HParams` object. - - Returns: - Changes affect self, but returns self for convenience. - - Raises: - ValueError: if trying to set unknown features, or if setting a feature - specific parameter for an unknown parameter. - """ - # First set the global parameters, so they become known and then feature - # specific parameters. - if hparams is not None: - for param_name, value in six.iteritems(hparams.values()): - if not PerFeatureHParams._is_feature_specific(param_name): - self.set_param(param_name, value) - for param_name, value in six.iteritems(hparams.values()): - if PerFeatureHParams._is_feature_specific(param_name): - self.set_param(param_name, value) - return self - - def parse(self, hparams_str): - """Parses strings into hparams. - - Args: - hparams_str: must be a comma separated list of "=", - where "" is a hyper-parameter name, and "" its value. - - Returns: - Changes affect self, but returns self for convenience. - - Raises: - ValueError: if there is a problem with the input: - * if trying to set an unknown parameter. - * if trying to set unknown feature(s) - * if can't convert value to parameter type. - """ - if hparams_str: - for pair in hparams_str.split(','): - (key, value) = pair.split('=') - self.parse_param(key, value) - return self - - -class CalibratedHParams(PerFeatureHParams): - """PerFeatureHParams specialization with input calibration parameters. - - The following hyper-parameters can be set as global, or per-feature (see - base `PerFeatureHParams` for details): - - * `feature_names`: list of feature names. Only features names listed here - (or added later with add_feature) can have feature specific parameter - values. - * `num_keypoints`: Number of keypoints to use for calibration, Set to 0 or - `None` for no calibration. - * `calibration_output_min`, `calibration_output_max`: initial and final - values for calibrations. -1.0 to 1.0 works well for calibrated linear - models. For lattices one will want to set these to (0, `lattice_size`-1). - Only used during initialization of the calibration, if `quantiles_dir` - is given to the calibrated model (as opposed to defining one's own value - with `keypoints_initializers_fn`). It must be defined for calibration to - work, no default is set. - * `calibration_bound`: If output of calibration max/min are bound to the - limits given in `calibration_output_min/max`. - * `monotonicity`: Monotonicity for the feature. 0 for no monotonicity, - 1 and -1 for increasing and decreasing monotonicity respectively. - * `missing_input_value`: If set, and if the input has this value it is - assumed - to be missing and the output will either be calibrated to some value - between `[calibration_output_min, calibration_output_max]` or set to a - fixed value set by missing_output_value. - * `missing_output_value`: Requires missing_input_value also to be set. If - set - if will convert missing input to this value. Leave it undefined and the - output will be learned. - * `calibration_` for all regularizer_name's in - regularizers.CALIBRATOR_REGULARIZERS. e.g. `calibration_l2_reg`. - """ - - def __init__(self, feature_names=None, **kwargs): - # Set default args, and override with given ones. - args = { - 'num_keypoints': 10, - 'calibration_output_min': None, - 'calibration_output_max': None, - 'calibration_bound': False, - 'monotonicity': 0, - 'missing_input_value': None, - 'missing_output_value': None, - } - regularizer_hparam_names = [ - 'calibration_{}'.format(regularizer_name) - for regularizer_name in regularizers.CALIBRATOR_REGULARIZERS - ] - args.update({ - regularizer_name: None for regularizer_name in regularizer_hparam_names - }) - args.update(kwargs) - super(CalibratedHParams, self).__init__(feature_names, **args) - self.set_param_type('monotonicity', int) - self.set_param_type('calibration_output_min', float) - self.set_param_type('calibration_output_max', float) - self.set_param_type('missing_input_value', float) - self.set_param_type('missing_output_value', float) - for regularizer_name in regularizer_hparam_names: - self.set_param_type(regularizer_name, float) - - -class CalibratedLinearHParams(CalibratedHParams): - """Hyper-parameters for CalibratedLinear models. - - Same as `CalibratedHParams` (hyper-parameters for input calibration) plus - the global learning_rate. - - The parameters `calibration_output_min` and `calibration_output_max` shouldn't - be changed (they are fixed at -1. and +1), since they are eventually re-scaled - by the linear layer on top. - - It supports regularization, monotonicity and missing values (input and - optionally output). - """ - - def __init__(self, feature_names=None, **kwargs): - # Set default args, and override with given ones. - args = { - 'learning_rate': 0.1, - 'calibration_output_min': -1., - 'calibration_output_max': 1., - } - args.update(kwargs) - super(CalibratedLinearHParams, self).__init__(feature_names, **args) - - -class CalibratedLatticeHParams(CalibratedHParams): - """Hyper-parameters for CalibratedLattice models. - - Supports regularization and monotonicity like described in `CalibratedHParam`. - Values for `calibration_output_min`, `calibration_output_max` and - `missing_output_value` get set automatically. - - Added parameters: - - * `learning_rate`: (float) a global parameter that assigns a step size of an - optimizer. - * `lattice_size`: (int) a global or per feature parameter that controls number - of cells for a feature. Should be greater than equal to 2, and the - recommended default value is 2. Also calibrator output min and max should be - [0, lattice_size - 1], and the output should be bounded, since a lattice - expects an input in the range [0, lattice_size - 1]. - * `interpolation_type`: a global parameter that defines if the lattice will - interpolate using the full hypercube or only the simplex ("hyper-triangle", - much faster for larger lattices) around the point being evaluated. - Valid values: 'hypercube' or 'simplex' - * `missing_input_value`: Value for which a feature is considered missing. Such - values are either automatically learned to some calibrated value, or, - if missing_vertex is set, they get their own value in the lattice. - * `missing_vertex`: if missing_input_value is set, this boolean value indicate - whether to create an extra vertex for missing values. - * `lattice_` for all regularizer_name's in - regularizers.LATTICE_REGULARIZERS. e.g. `lattice_l2_reg`. - """ - - def __init__(self, feature_names=None, **kwargs): - # Set default args, and override with given ones. - args = { - 'learning_rate': 0.1, - 'lattice_size': 2, - 'interpolation_type': 'hypercube', - 'calibration_bound': True, - 'missing_input_value': None, - 'missing_vertex': False, - } - regularizer_hparam_names = [ - 'lattice_{}'.format(regularizer_name) - for regularizer_name in regularizers.LATTICE_REGULARIZERS - ] - args.update({ - regularizer_name: None for regularizer_name in regularizer_hparam_names - }) - args.update(kwargs) - super(CalibratedLatticeHParams, self).__init__(feature_names, **args) - self.set_param_type('missing_input_value', float) - for regularizer_name in regularizer_hparam_names: - self.set_param_type(regularizer_name, float) - - -class CalibratedRtlHParams(CalibratedHParams): - """Hyper-parameters for CalibratedRtl (RandomTinyLattices) models. - - Supports regularization and monotonicity like described in `CalibratedHParam`. - Values for `calibration_output_min`, `calibration_output_max` and - `missing_output_value` get set automatically. - - Added parameters: - - * `learning_rate`: (float) a global parameter that assigns a step size of an - optimizer. - * `lattice_size`: (int) a global or per feature parameter that controls number - of cells for a feature. Should be greater than equal to 2, and the - recommended default value is 2. Also calibrator output min and max should be - [0, lattice_size - 1], and the output should be bounded, since a lattice - expects an input in the range [0, lattice_size - 1]. (Note if missing_vertex - is True, then we add an extra vertex, so input range is [0, lattice_size]) - * `num_lattices`: (int) a number of lattices to be created. - * `lattice_rank`: (int) a lattice rank in each lattice. - * `interpolation_type`: a global parameter that defines if the lattice will - interpolate using the full hypercube or only the simplex ("hyper-triangle", - much faster for larger lattices) around the point being evaluated. - Valid values: 'hypercube' or 'simplex' - * `ensemble_bias`: (float) an initial value of bias term to be added to the - output of ensemble. - * `rtl_seed`: (int) a random seed for rtl construction. - * `missing_input_value`: Value for which a feature is considered missing. Such - values are either automatically learned to some calibrated value, or, - if missing_vertex is set, they get their own value in the lattice. - * `missing_vertex`: if missing_input_value is set, this boolean value indicate - whether to create an extra vertex for missing values. - * `lattice_` for all regularizer_name's in - regularizers.LATTICE_REGULARIZERS. e.g. `lattice_l2_reg`. - """ - - def __init__(self, feature_names=None, **kwargs): - # Set default args, and override with given ones. - args = { - 'learning_rate': 0.1, - 'lattice_size': 2, - 'num_lattices': None, - 'lattice_rank': None, - 'interpolation_type': 'hypercube', - 'rtl_seed': 12345, - 'calibration_bound': True, - 'missing_input_value': None, - 'missing_vertex': False, - 'ensemble_bias': 0.0, - } - regularizer_hparam_names = [ - 'lattice_{}'.format(regularizer_name) - for regularizer_name in regularizers.LATTICE_REGULARIZERS - ] - args.update({ - regularizer_name: None for regularizer_name in regularizer_hparam_names - }) - args.update(kwargs) - super(CalibratedRtlHParams, self).__init__(feature_names, **args) - self.set_param_type('num_lattices', int) - self.set_param_type('lattice_rank', int) - self.set_param_type('missing_input_value', float) - for regularizer_name in regularizer_hparam_names: - self.set_param_type(regularizer_name, float) - - -class CalibratedEtlHParams(CalibratedHParams): - """Hyper-parameters for CalibratedEtl (Embedded tiny lattices) models. - - Supports regularization and monotonicity like described in `CalibratedHParam`. - Values for `calibration_output_min`, `calibration_output_max` and - `missing_output_value` get set automatically. - - Note that this architecture does not support any of per-feature based lattice - hyper-parameters such as missing_vertex, per-feature missing_input_value, - per-feature lattice_size, per-feature lattice regularization, because after - the linear embedding, all of features are mixed together, so it is not clear - how to merge per-feature parameters after the linear embedding layer. - - If there is no non-monotonic feature, but `non_monotonic_lattice_rank` or - `non_monotonic_num_lattices` are not `None`, then this will raise the error. - - Added parameters: - - * `learning_rate`: (float) a global parameter that assigns a step size of an - optimizer. - * `lattice_size`: (int) a global parameter that controls number of - cells for a feature. Should be greater than equal to 2, and the recommended - default value is 2. Also calibrator output min and max should be - [0, `lattice_size` - 1], and the output should be bounded. - * `interpolation_type`: a global parameter that defines if the lattice will - interpolate using the full hypercube or only the simplex ("hyper-triangle", - much faster for larger lattices) around the point being evaluated. - Valid values: 'hypercube' or 'simplex' - * `monotonic_lattice_rank`: (int) a lattice rank in each monotonic lattice. - * `monotonic_num_lattices`: (int) a number of monotonic lattices to be - created. - * `monotonic_lattice_size`: (int) lattice cell size for each monotonic lattice - in the ensemble lattices layer. - * `non_monotonic_lattice_rank`: (int) a lattice rank in each non monotonic - lattice. If all features are monotonic, this parameter should be None. - * `non_monotonic_num_lattices`: (int) a number of non-monotonic lattices to be - created. If all features are monotonic, this parameter should be None. - * `monotonic_lattice_size`: (int) lattice cell size for each non-monotonic - lattice in the ensemble lattices layer. - * `linear_embedding_calibration_min`: (float) a global parameter that controls - a minimum value of intermediate calibration layers. Default is -100. - * `linear_embedding_calibration_max`: (float) a global parameter that controls - a maximum value of intermediate calibration layers. Default is 100. - * `linear_embedding_calibration_num_keypoints`: (float) a global parameter - that controls a `num_keypoints` in intermediate calibration layers. Default - is 100. - * `lattice_` for all regularizer_name's in - regularizers.LATTICE_REGULARIZERS. e.g. `lattice_l2_reg`. - """ - - def __init__(self, feature_names=None, **kwargs): - # Set default args, and override with given ones. - args = { - 'learning_rate': 0.1, - 'monotonic_lattice_rank': None, - 'monotonic_num_lattices': None, - 'monotonic_lattice_size': None, - 'non_monotonic_lattice_rank': None, - 'non_monotonic_num_lattices': None, - 'non_monotonic_lattice_size': None, - 'interpolation_type': 'hypercube', - 'calibration_bound': True, - 'linear_embedding_calibration_min': -100.0, - 'linear_embedding_calibration_max': 100.0, - 'linear_embedding_calibration_num_keypoints': 100, - } - regularizer_hparam_names = [ - 'lattice_{}'.format(regularizer_name) - for regularizer_name in regularizers.LATTICE_REGULARIZERS - ] - args.update({ - regularizer_name: None for regularizer_name in regularizer_hparam_names - }) - args.update(kwargs) - super(CalibratedEtlHParams, self).__init__(feature_names, **args) - self.set_param_type('monotonic_lattice_rank', int) - self.set_param_type('monotonic_num_lattices', int) - self.set_param_type('monotonic_lattice_size', int) - self.set_param_type('non_monotonic_lattice_rank', int) - self.set_param_type('non_monotonic_num_lattices', int) - self.set_param_type('non_monotonic_lattice_size', int) - self.set_param_type('linear_embedding_calibration_min', float) - self.set_param_type('linear_embedding_calibration_max', float) - self.set_param_type('linear_embedding_calibration_num_keypoints', int) - for regularizer_name in regularizer_hparam_names: - self.set_param_type(regularizer_name, float) diff --git a/tensorflow_lattice/python/estimators/hparams_test.py b/tensorflow_lattice/python/estimators/hparams_test.py deleted file mode 100644 index 01f1477..0000000 --- a/tensorflow_lattice/python/estimators/hparams_test.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for hyper-parameters support class for TensorFlow Lattice.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from tensorflow_lattice.python.estimators import hparams - - -class TensorFlowLatticeHParamsTest(tf.test.TestCase): - - def testPerFeatureHParams(self): - default_num_keypoints = 10 - feature_x0_num_keypoints = 5 - hp = hparams.PerFeatureHParams( - ['x0', 'x1'], - num_keypoints=default_num_keypoints, - feature__x0__num_keypoints=feature_x0_num_keypoints) - hp.add_feature(['x2']) - self.assertEqual(hp.get_feature_names(), ['x0', 'x1', 'x2']) - - # Check missing parameter: not feature specific parameter - # can be set if the generic one wasn't set first. - with self.assertRaises(ValueError): - hp.set_param('feature__x0__foobar', 10) - - # Make sure returns copy of internal list. - hp.get_feature_names()[0] = 'z' - self.assertEqual(hp.get_feature_names(), ['x0', 'x1', 'x2']) - - # Check values: both global and for specialized value for x0. - self.assertEqual(hp.num_keypoints, default_num_keypoints) - self.assertEqual( - hp.get_feature_param('x0', 'num_keypoints'), feature_x0_num_keypoints) - self.assertEqual( - hp.get_feature_param('x1', 'num_keypoints'), default_num_keypoints) - self.assertEqual( - hp.get_feature_param('x2', 'num_keypoints'), default_num_keypoints) - - # Check missing parameter. - with self.assertRaises(AttributeError): - _ = hp.foobar - - # Check that missing feature raises exception. - with self.assertRaisesRegexp(ValueError, 'Unknown feature name "x3".*'): - hp.get_feature_param('x3', 'num_keypoints') - - # Check that missing parameter returns None. - self.assertEqual(hp.get_feature_param('x2', 'unknown_parameter'), None) - - # Check is_feature_set_param. - self.assertEqual(hp.is_feature_set_param('x0', 'num_keypoints'), True) - self.assertEqual(hp.is_feature_set_param('x1', 'num_keypoints'), False) - - # Check that object can't be created with feature specific parameters set - # for unknown feature. - with self.assertRaisesRegexp( - ValueError, 'Unknown feature "x2" for feature specific parameter ' - '"feature__x2__num_keypoints"'): - # x2 doesn't exist, this should raise. - _ = hparams.PerFeatureHParams( - ['x0', 'x1'], - num_keypoints=default_num_keypoints, - feature__x2__num_keypoints=10) - - def testAddFeature(self): - default_num_keypoints = 10 - feature_x0_num_keypoints = 5 - hp = hparams.PerFeatureHParams( - [u'x0', 'x1'], - num_keypoints=default_num_keypoints, - feature__x0__num_keypoints=feature_x0_num_keypoints) - # Unicode feature name. - hp.add_feature([u'x2']) - self.assertEqual(hp.get_feature_names(), ['x0', 'x1', 'x2']) - self.assertEqual( - hp.get_feature_param('x0', 'num_keypoints'), feature_x0_num_keypoints) - self.assertEqual( - hp.get_feature_param('x1', 'num_keypoints'), default_num_keypoints) - self.assertEqual( - hp.get_feature_param('x2', 'num_keypoints'), default_num_keypoints) - # Feature name not of expected type string. - with self.assertRaises(ValueError) as value_error: - hp.add_feature([1.0]) - self.assertEqual('feature_name should either be a list of strings,' - ' or a string, got "[1.0]"', - str(value_error.exception)) - - def testGlobalPerFeatureHParams(self): - hp = hparams.PerFeatureHParams(['x0', 'x1'], num_keypoints=2) - self.assertEqual(hp.get_param('num_keypoints'), 2) - hp.set_param('num_keypoints', 3) - self.assertEqual(hp.get_param('num_keypoints'), 3) - - def testParseHParms(self): - hp_from = hparams.PerFeatureHParams(['x0', 'x1'], num_keypoints=5) - hp_to = hparams.PerFeatureHParams(['x0', 'x1'], num_keypoints=2) - hp_to.set_feature_param('x0', 'num_keypoints', 3) - hp_to.parse_hparams(hp_from) - self.assertEqual(hp_to.get_feature_param('x0', 'num_keypoints'), 3) - self.assertEqual(hp_to.get_feature_param('x1', 'num_keypoints'), 5) - hp_to.parse_hparams(None) - - def testParseString(self): - hp = hparams.PerFeatureHParams( - ['x0', 'x1', 'x2'], num_keypoints=2, learning_rate=1.0) - hp.set_feature_param('x0', 'num_keypoints', 3) - - # Test normal use case. - hp.parse('num_keypoints=5,learning_rate=0.1,feature__x2__num_keypoints=7') - self.assertEqual(hp.get_feature_param('x0', 'num_keypoints'), 3) - self.assertEqual(hp.get_feature_param('x1', 'num_keypoints'), 5) - self.assertEqual(hp.get_feature_param('x2', 'num_keypoints'), 7) - self.assertEqual(hp.learning_rate, 0.1) - - # Test that parsing None and empy has no effect. - hp.parse(None) - hp.parse('') - self.assertEqual(hp.get_feature_param('x0', 'num_keypoints'), 3) - self.assertEqual(hp.get_feature_param('x1', 'num_keypoints'), 5) - self.assertEqual(hp.get_feature_param('x2', 'num_keypoints'), 7) - self.assertEqual(hp.learning_rate, 0.1) - - # Test failures. - with self.assertRaises(ValueError): - hp.parse('feature__x3__num_keypoints=10') # Unknwon feature. - with self.assertRaises(ValueError): - hp.parse('foobar=10') # Unknwon parameter. - with self.assertRaises(ValueError): - hp.parse('feature__x1__foobar=10') # Unknwon parameter for feature. - with self.assertRaises(ValueError): - hp.parse('num_keypoints=1.1') # Invalid type to parse. - - def testSetParamType(self): - hp = hparams.PerFeatureHParams(['x0', 'x1'], foo='abc') - hp.parse('foo=def') - self.assertEqual(hp.foo, 'def') - hp.set_param('foo', 10) - hp.set_param_type('foo', int) - with self.assertRaises(ValueError): - # Should fail, since now foo is of type int. - hp.parse('foo=def') - - def testConstructorsAllTypes(self): - _ = hparams.CalibratedHParams(['x0', 'x1']) - _ = hparams.CalibratedLinearHParams(['x0', 'x1'], learning_rate=0.1) - _ = hparams.CalibratedLatticeHParams(['x0', 'x1'], learning_rate=0.1) - _ = hparams.CalibratedRtlHParams(['x0', 'x1'], learning_rate=0.1) - etl = hparams.CalibratedEtlHParams(['x0', 'x1'], learning_rate=0.1) - - etl.parse('calibration_bound=yes') - self.assertTrue(etl.calibration_bound) - etl.parse('calibration_bound=off') - self.assertFalse(etl.calibration_bound) - with self.assertRaises(ValueError): - etl.parse('calibration_bound=foobar') - - def testAddNonExistingPerFeatureParam(self): - hp = hparams.CalibratedLinearHParams(['x0', 'x1']) - hp.set_feature_param('x0', 'calibration_l2_laplacian_reg', 0.1) - self.assertAlmostEqual( - hp.get_feature_param('x0', 'calibration_l2_laplacian_reg'), 0.1) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_lattice/python/estimators/separately_calibrated_rtl.py b/tensorflow_lattice/python/estimators/separately_calibrated_rtl.py deleted file mode 100644 index 496fbac..0000000 --- a/tensorflow_lattice/python/estimators/separately_calibrated_rtl.py +++ /dev/null @@ -1,569 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""SeparatelyCalibratedRtl canned estimators.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import copy -import os -import random - -# Dependency imports -import six - -from tensorflow_lattice.python.estimators import calibrated as calibrated_lib -from tensorflow_lattice.python.estimators import hparams as tfl_hparams -from tensorflow_lattice.python.lib import lattice_layers -from tensorflow_lattice.python.lib import regularizers -from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import - -_EPSILON = 1e-7 - -_RTL_STRUCTURE_FILE = 'rtl_structure.csv' - - -class _SeparatelyCalibratedRtl(calibrated_lib.Calibrated): - """Base class for SeparatelyCalibratedRtl{Classifier|Regressor}.""" - - def __init__(self, - n_classes, - feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - lattice_initializers_fn=None, - optimizer=None, - config=None, - hparams=None, - head=None, - weight_column=None): - """Construct CalibrateRtlClassifier/Regressor.""" - if not hparams: - hparams = tfl_hparams.CalibratedRtlHParams([]) - self.check_hparams(hparams) - hparams = self._adjust_calibration_params(hparams) - - self.lattice_initializers_fn_ = lattice_initializers_fn - - super(_SeparatelyCalibratedRtl, - self).__init__(n_classes, feature_columns, model_dir, quantiles_dir, - keypoints_initializers_fn, optimizer, config, hparams, - head, weight_column, 'rtl') - self._structure_file = os.path.join(self._model_dir, _RTL_STRUCTURE_FILE) - - def _check_per_feature_param_configuration( - self, adjusted, monotonicity, lattice_size, calibration_output_min, - calibration_output_max, calibration_bound, missing_input_value, - missing_vertex): - """Check parameter configuration and returns the error messages.""" - error_messages = [] - if monotonicity not in {-1, 0, +1}: - error_messages.append('monotonicity should be an integer {-1, 0, +1} ' - 'but is %s' % monotonicity) - - if lattice_size < 2: - error_messages.append('lattice_size should be greater than equal to 2' - 'but is %d' % (lattice_size)) - - if not calibration_bound: - error_messages.append( - 'A lattice expects an bounded input from a calibration layer, but ' - 'calibration_bound is set to be False') - - if not adjusted: - if calibration_output_min is not None: - error_messages.append( - 'calibration_output_min=%d should not be set, it is adjusted ' - 'automatically to match the lattice_size' % calibration_output_min) - if calibration_output_max is not None: - error_messages.append( - 'calibration_output_max=%d should not be set, it is adjusted ' - 'automatically to match the lattice_size' % calibration_output_max) - - if missing_input_value is None and missing_vertex: - error_messages.append( - 'missing_vertex is True, however missing_input_value not set') - - return error_messages - - def _check_not_allowed_feature_params(self, hparams): - """Check hparams contains feature-level value that are not allowed. - - Certain values cannot be feature-level hyperparameters. This function checks - whether any of feature sets hparams that are not allowed to be feature-level - hyperparameter, and returns non-empty error messages if there is an error. - - Args: - hparams: (CalibratedRtlHparams) hyperparameters needs to be checked. - Returns: - error_messages: (list of strings) error messages. - """ - not_allowed_feature_params = map( - 'lattice_{}'.format, - regularizers.LATTICE_MULTI_DIMENSIONAL_REGULARIZERS) - error_messages = [] - for param in not_allowed_feature_params: - for feature_name in hparams.get_feature_names(): - if hparams.is_feature_set_param(feature_name, param): - error_messages.append('feature %s sets %s, which is not allowed.' % - (feature_name, param)) - return error_messages - - def check_hparams(self, hparams, adjusted=False): - """Check pre-conditions of hparams. - - Args: - hparams: (tfl_hparams.CalibratedRtlHParams) Hyperparameter to be - examined. - adjusted: if these are the parameters already adjusted. For example, - calibrator_output_min and max should be adjusted so that the output is - in [0, lattice_size - 1] (or [0, lattice_size] if missing_vertex - == True) and calibrator bound should set to be true, etc. - If adjust is True, we will check that all the parameter values is valid, - otherwise, some checks will be skipped. - Raises: - ValueError: If the hyperparameter configuration is invalid, for example - calibration_monotonic is None, but lattice_monotonic is True, then raise - the error with a root cause. - """ - error_messages = self._check_not_allowed_feature_params(hparams) - - # Check lattice_rank and num_lattices. - lattice_rank = hparams.get_param('lattice_rank') - num_lattices = hparams.get_param('num_lattices') - if lattice_rank is None or num_lattices is None: - error_messages.append('lattice_rank and num_lattices should be provided') - - # Check global params. - feature_names = hparams.get_feature_names() - packed_feature_values = hparams.get_global_and_feature_params([ - 'monotonicity', 'lattice_size', 'calibration_output_min', - 'calibration_output_max', 'calibration_bound', 'missing_input_value', - 'missing_vertex' - ], feature_names) - default_feature_values, per_feature_values = packed_feature_values - param_error_messages = self._check_per_feature_param_configuration( - adjusted, *default_feature_values) - if param_error_messages: - error_messages.append('Error message for default feature param:') - error_messages += param_error_messages - - # Check per feature params. hparams.get_feature_names() will only return - # feature names that sets per feature parameters. - for feature_idx in range(len(per_feature_values)): - param_error_messages = self._check_per_feature_param_configuration( - adjusted, *per_feature_values[feature_idx]) - if param_error_messages: - error_messages.append( - 'Error message for %s feature param:' % feature_names[feature_idx]) - error_messages += param_error_messages - - if error_messages: - raise ValueError( - 'Hyperparameter configuration cannot be used in the calibrated ' - 'rtl estimator. Error messages report the issue per feature, but' - ' the parameter may be inherited from global parameter.\nDetailed ' - 'error messsages\n%s' % '\n'.join(error_messages)) - - def _adjust_calibration_params(self, hparams): - """Adjust the calibration parameters to match the input siz of lattices.""" - hparams = copy.deepcopy(hparams) - feature_names = hparams.get_feature_names() - packed_feature_values = hparams.get_global_and_feature_params( - ['lattice_size', 'missing_input_value', 'missing_vertex'], - feature_names) - default_feature_values, per_feature_values = packed_feature_values - final_lattice_size, missing_output_value = self._calibration_params( - *default_feature_values) - lattice_size = default_feature_values[0] - hparams.set_param('calibration_output_min', 0) - hparams.set_param('calibration_output_max', lattice_size - 1) - hparams.set_param('final_lattice_size', final_lattice_size) - hparams.set_param('missing_output_value', missing_output_value) - - if len(per_feature_values) != len(feature_names): - raise ValueError( - 'length of per_feature_value (%d) != length of feature_names (%d)' % - (len(per_feature_values), len(feature_names))) - for (per_feature_value, feature_name) in zip(per_feature_values, - feature_names): - final_lattice_size, missing_output_value = self._calibration_params( - *per_feature_value) - lattice_size = per_feature_value[0] - hparams.set_feature_param(feature_name, 'calibration_output_min', 0) - hparams.set_feature_param(feature_name, 'calibration_output_max', - lattice_size - 1) - hparams.set_feature_param(feature_name, 'final_lattice_size', - final_lattice_size) - hparams.set_feature_param(feature_name, 'missing_output_value', - missing_output_value) - return hparams - - def _calibration_params(self, lattice_size, missing_input_value, - missing_vertex): - """Returns final_lattice_size and missing_output_value.""" - if missing_input_value is None or not missing_vertex: - return lattice_size, None - - # Last vertex of the lattice is reserved for missing values. - return lattice_size + 1, lattice_size - - def _load_structure(self): - """Load rtl structure from model_dir.""" - if not file_io.file_exists(self._structure_file): - raise ValueError( - 'Structure file does not exists in %s!' % self._structure_file) - structure_csv_string = file_io.read_file_to_string(self._structure_file) - structure_csvs = structure_csv_string.split('\n') - structure = [] - for structure_csv in structure_csvs: - structure.append([int(idx) for idx in structure_csv.split(',')]) - return structure - - def _save_structure(self, structure): - """Save rtl structure to model_dir.""" - structure_csvs = [] - for lattice in structure: - structure_csvs.append(','.join([str(idx) for idx in lattice])) - structure_csv_string = '\n'.join(structure_csvs) - file_io.write_string_to_file(self._structure_file, structure_csv_string) - - - def _create_structure(self, input_dim, num_lattices, lattice_rank, rtl_seed): - """Create and save rtl structure to model_dir.""" - rtl_random = random.Random(rtl_seed) - structure = [] - for _ in range(num_lattices): - structure.append( - rtl_random.sample(six.moves.xrange(input_dim), lattice_rank)) - return structure - - def calibration_structure_builder(self, columns_to_tensors, hparams): - """Returns the calibration structure of the model. See base class.""" - - - # Check to make sure input features are single dimensional. - for (column, tensor) in six.iteritems(columns_to_tensors): - if len(tensor.shape) > 1 and tensor.shape.dims[1].value > 1: - raise ValueError( - 'Separately calibrated RTLs do not support multi dimensional ' - 'features: %s with shape %s' % (column, tensor.shape)) - sorted_columns = sorted(columns_to_tensors.keys()) - n_inputs = len(columns_to_tensors) - - num_lattices = hparams.get_param('num_lattices') - lattice_rank = hparams.get_param('lattice_rank') - rtl_seed = hparams.get_param('rtl_seed') - # Create and save structure if it does not exists. - - if not file_io.file_exists(self._structure_file): - structure = self._create_structure(n_inputs, num_lattices, lattice_rank, - rtl_seed) - self._save_structure(structure) - structure = self._load_structure() - # Check structure is what we expect. - if len(structure) != num_lattices: - raise ValueError( - 'Expect %d number of lattices, but found %d number of lattices in ' - 'structure: %s' % (num_lattices, len(structure), str(structure))) - calibration_structure = [] - for indices in structure: - if len(indices) != lattice_rank: - raise ValueError('Expect %d lattice rank, but found %d in structure: %s' - % (lattice_rank, len(indices), str(structure))) - sub_columns_to_tensors = { - sorted_columns[i]: columns_to_tensors[sorted_columns[i]] - for i in indices - } - calibration_structure.append(sub_columns_to_tensors) - - return calibration_structure - - def prediction_builder_from_calibrated( - self, mode, per_dimension_feature_names, hparams, calibrated): - """Construct the prediciton.""" - self.check_hparams(hparams, adjusted=True) - lattice_sizes = [ - hparams.get_feature_param(f, 'final_lattice_size') - for f in per_dimension_feature_names - ] - lattice_monotonic = [(hparams.get_feature_param(f, 'monotonicity') != 0) - for f in per_dimension_feature_names] - interpolation_type = hparams.get_param('interpolation_type') - # Setup the regularization. - regularizer_amounts = {} - for reg_name in regularizers.LATTICE_MULTI_DIMENSIONAL_REGULARIZERS: - regularizer_amounts[reg_name] = hparams.get_param( - 'lattice_{}'.format(reg_name)) - for reg_name in regularizers.LATTICE_ONE_DIMENSIONAL_REGULARIZERS: - regularizer_amounts[reg_name] = [ - hparams.get_feature_param(feature_name, 'lattice_{}'.format(reg_name)) - for feature_name in per_dimension_feature_names - ] - - (prediction, _, projection_ops, - regularization) = lattice_layers.lattice_layer( - calibrated, - lattice_sizes, - is_monotone=lattice_monotonic, - interpolation_type=interpolation_type, - lattice_initializer=self.lattice_initializers_fn_, - **regularizer_amounts) - # Returns prediction Tensor, projection ops, and regularization. - return prediction, projection_ops, regularization - - -def separately_calibrated_rtl_classifier(feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None, - head=None, - weight_column=None): - """Calibrated rtl binary classifier model. - - - - This model uses a piecewise lattice calibration function on each of the - inputs (parametrized) and then feeds them to ensemble of random lattices. - num_lattices and lattice_rank (number of inputs to each lattice) must be - specified in the hyperparameter. Optionally calibration can be made monotonic. - - It usually requires a preprocessing step on the data, to calculate the - quantiles of each used feature. This can be done locally or in one worker - only before training, in a separate invocation of your program (or directly). - Typically this can be saved (`save_dir` parameter) to the same - directory where the data is. - - Hyper-parameters are given in the form of the object - tfl_hparams.CalibrationRtlHParams. lattice_rank and num_lattices must - be specified; there would be no default value for this. It also takes in - per-feature parameters. - - Internally values will be converted to tf.float32. - - Example: - - ```python - def input_fn_train: ... - def input_fn_eval: ... - - my_feature_columns=[...] - - # Have a separate program flag to generate the quantiles. Need to be run - # only once. - if FLAGS.create_quantiles: - pwl_calibrators_layers.calculate_quantiles_for_keypoints( - input_fn=input_fn_train, - feature_columns=my_feature_columns, - save_dir=FLAGS.data_dir, - num_quantiles=1000, - override=True) - return # Exit program. - - hparams = hparams.CalibratedRtlHparams(num_lattices=10, lattice_rank=2) - estimator = separately_calibrated_rtl.separately_calibrated_rtl_classifier( - feature_columns=feature_columns, hparams=hparams) - estimator.train(input_fn=input_fn_train) - estimator.evaluate(input_fn=input_fn_eval) - estimator.predict(input_fn=input_fn_predict) - ``` - - Args: - feature_columns: Optional, an iteratable containing all the feature - columns used by the model. All items in the set should be instances of - classes derived from `FeatureColumn`. If not given, the model will - use as features the tensors returned by input_fn. - Supported types of columns: RealValuedColumn. - model_dir: Directory to save model parameters, graphs and etc. This can - also be used to load checkpoints from the directory into an estimator to - continue training a previously saved model. - quantiles_dir: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated only once with - `pwl_calibration_layers.calculate_quantiles_for_keypoints` in a separate - invocation of your program. If you don't want to use quantiles, you can - set `keypoints_initializer` instead. - keypoints_initializers_fn: if you know the distribution of your - input features you can provide that directly instead of `quantiles_dir`. - See `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be - a closure that returns a pair of tensors with keypoints inputs and - outputs to use for initialization (must match `num_keypoints` configured - in `hparams`). Alternatively the closure can return a dict mapping - feature name to pairs for initialization per feature. If `quantiles_dir` - and `keypoints_initializers_fn` are set, the later takes precendence, - and the features for which `keypoints_initializers` are not defined - fallback to using the quantiles found in `quantiles_dir`. It uses a - closure instead of the tensors themselves because the graph has to be - created at the time the model is being build, which happens at a later - time. - optimizer: string, `Optimizer` object, or callable that defines the - optimizer to use for training -- if a callable, it will be called with - learning_rate=hparams.learning_rate. - config: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). - hparams: an instance of tfl_hparams.CalibrationRtlHParams. If set to - None default parameters are used. - head: a `TensorFlow Estimator Head` which specifies how the loss function, - final predictions, and so on are generated from model outputs. Defaults - to using a sigmoid cross entropy head for binary classification and mean - squared error head for regression. - weight_column: A string or a `tf.feature_column.numeric_column` defining - feature column representing weights. It is used to down weight or boost - examples during training. It will be multiplied by the loss of the - example. - - Returns: - A `separately_calibrated_rtl_classifier` estimator. - - Raises: - ValueError: invalid parameters. - KeyError: type of feature not supported. - """ - return _SeparatelyCalibratedRtl( - n_classes=2, - feature_columns=feature_columns, - model_dir=model_dir, - quantiles_dir=quantiles_dir, - keypoints_initializers_fn=keypoints_initializers_fn, - optimizer=optimizer, - config=config, - hparams=hparams, - head=head, - weight_column=weight_column) - - -def separately_calibrated_rtl_regressor(feature_columns=None, - model_dir=None, - quantiles_dir=None, - keypoints_initializers_fn=None, - optimizer=None, - config=None, - hparams=None, - head=None, - weight_column=None): - """Calibrated rtl regressor model. - - This model uses a piecewise lattice calibration function on each of the - inputs (parametrized) and then feeds them to ensemble of random lattices. - num_lattices and lattice_rank (number of inputs to each lattice) must be - specified in the hyperparameter. Optionally calibration can be made monotonic. - - It usually requires a preprocessing step on the data, to calculate the - quantiles of each used feature. This can be done locally or in one worker - only before training, in a separate invocation of your program (or directly). - Typically this can be saved (`save_dir` parameter) to the same - directory where the data is. - - Hyper-parameters are given in the form of the object - tfl_hparams.CalibrationRtlHParams. lattice_rank and num_lattices must - be specified; there would be no default value for this. It also takes in - per-feature parameters. - - Internally values will be converted to tf.float32. - - Example: - - ```python - def input_fn_train: ... - def input_fn_eval: ... - - my_feature_columns=[...] - - # Have a separate program flag to generate the quantiles. Need to be run - # only once. - if FLAGS.create_quantiles: - pwl_calibrators_layers.calculate_quantiles_for_keypoints( - input_fn=input_fn_train, - feature_columns=my_feature_columns, - save_dir=FLAGS.data_dir, - num_quantiles=1000, - override=True) - return # Exit program. - - hparams = hparams.CalibratedRtlHparams(num_lattices=10, lattice_rank=2) - estimator = separately_calibrated_rtl.separately_calibrated_rtl_classifier( - feature_columns=feature_columns, hparams=hparams) - estimator.train(input_fn=input_fn_train) - estimator.evaluate(input_fn=input_fn_eval) - estimator.predict(input_fn=input_fn_test) - ``` - - Args: - feature_columns: Optional, an iteratable containing all the feature - columns used by the model. All items in the set should be instances of - classes derived from `FeatureColumn`. If not given, the model will - use as features the tensors returned by input_fn. - Supported types of columns: RealValuedColumn. - model_dir: Directory to save model parameters, graphs and etc. This can - also be used to load checkpoints from the directory into an estimator to - continue training a previously saved model. - quantiles_dir: location where quantiles for the data was saved. Typically - the same directory as the training data. These quantiles can be - generated only once with - `pwl_calibration_layers.calculate_quantiles_for_keypoints` in a separate - invocation of your program. If you don't want to use quantiles, you can - set `keypoints_initializer` instead. - keypoints_initializers_fn: if you know the distribution of your - input features you can provide that directly instead of `quantiles_dir`. - See `pwl_calibrators_layers.uniform_keypoints_for_signal`. It must be - a closure that returns a pair of tensors with keypoints inputs and - outputs to use for initialization (must match `num_keypoints` configured - in `hparams`). Alternatively the closure can return a dict mapping - feature name to pairs for initialization per feature. If `quantiles_dir` - and `keypoints_initializers_fn` are set, the later takes precendence, - and the features for which `keypoints_initializers` are not defined - fallback to using the quantiles found in `quantiles_dir`. It uses a - closure instead of the tensors themselves because the graph has to be - created at the time the model is being build, which happens at a later - time. - optimizer: string, `Optimizer` object, or callable that defines the - optimizer to use for training -- if a callable, it will be called with - learning_rate=hparams.learning_rate. - config: RunConfig object to configure the runtime settings. Typically set - to learn_runner.EstimatorConfig(). - hparams: an instance of tfl_hparams.CalibrationRtlHParams. If set to - None default parameters are used. - head: a `TensorFlow Estimator Head` which specifies how the loss function, - final predictions, and so on are generated from model outputs. Defaults - to using a sigmoid cross entropy head for binary classification and mean - squared error head for regression. - weight_column: A string or a `tf.feature_column.numeric_column` defining - feature column representing weights. It is used to down weight or boost - examples during training. It will be multiplied by the loss of the - example. - - Returns: - A `separately_calibrated_rtl_regressor` estimator. - - Raises: - ValueError: invalid parameters. - KeyError: type of feature not supported. - """ - return _SeparatelyCalibratedRtl( - n_classes=0, - feature_columns=feature_columns, - model_dir=model_dir, - quantiles_dir=quantiles_dir, - keypoints_initializers_fn=keypoints_initializers_fn, - optimizer=optimizer, - config=config, - hparams=hparams, - head=head, - weight_column=weight_column) diff --git a/tensorflow_lattice/python/estimators/separately_calibrated_rtl_test.py b/tensorflow_lattice/python/estimators/separately_calibrated_rtl_test.py deleted file mode 100644 index a72960f..0000000 --- a/tensorflow_lattice/python/estimators/separately_calibrated_rtl_test.py +++ /dev/null @@ -1,432 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""CalibratedRtl provide canned estimators.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - -from tensorflow_lattice.python.estimators import hparams as tfl_hparams -from tensorflow_lattice.python.estimators import separately_calibrated_rtl as scrtl -from tensorflow_lattice.python.lib import keypoints_initialization -from tensorflow_lattice.python.lib import test_data - -_NUM_KEYPOINTS = 50 - - -class CalibratedRtlHParamsTest(tf.test.TestCase): - - def setUp(self): - super(CalibratedRtlHParamsTest, self).setUp() - self.hparams = tfl_hparams.CalibratedRtlHParams(feature_names=['x']) - self.hparams.set_param('lattice_size', 2) - self.hparams.set_param('calibrator_output_min', 0) - self.hparams.set_param('calibrator_output_max', 1) - self.hparams.set_param('calibration_bound', True) - self.hparams.set_param('lattice_rank', 2) - self.hparams.set_param('num_lattices', 10) - self.empty_estimator = scrtl.separately_calibrated_rtl_classifier( - hparams=self.hparams) - - def testWrongLatticeSize(self): - self.hparams.set_feature_param('x', 'lattice_size', -1) - self.assertRaisesRegexp( - ValueError, - 'Hyperparameter configuration cannot be used in the calibrated rtl ' - 'estimator.', self.empty_estimator.check_hparams, self.hparams) - - def testWrongCalibrationOutputMin(self): - self.hparams.set_param('calibration_output_min', 0.0) - self.hparams.set_feature_param('x', 'calibration_output_min', -1) - self.assertRaisesRegexp( - ValueError, - 'calibration_output_min=-1 should not be set, it is adjusted ' - 'automatically to match the lattice_size', - self.empty_estimator.check_hparams, self.hparams) - - def testWrongCalibrationOutputMax(self): - self.hparams.set_param('calibration_output_max', 0.0) - self.hparams.set_feature_param('x', 'calibration_output_max', 10) - self.assertRaisesRegexp( - ValueError, - 'calibration_output_max=10 should not be set, it is adjusted ' - 'automatically to match the lattice_size', - self.empty_estimator.check_hparams, self.hparams) - - def testWrongCalibrationBound(self): - self.hparams.set_feature_param('x', 'calibration_bound', False) - self.assertRaisesRegexp( - ValueError, - 'Hyperparameter configuration cannot be used in the calibrated rtl ' - 'estimator.', self.empty_estimator.check_hparams, self.hparams) - - def testNoLatticeRank(self): - self.hparams.set_param('lattice_rank', None) - self.assertRaisesRegexp( - ValueError, - 'Hyperparameter configuration cannot be used in the calibrated rtl ' - 'estimator.', self.empty_estimator.check_hparams, self.hparams) - - def testNoNumLattices(self): - self.hparams.set_param('num_lattices', None) - self.assertRaisesRegexp( - ValueError, - 'Hyperparameter configuration cannot be used in the calibrated rtl ' - 'estimator.', self.empty_estimator.check_hparams, self.hparams) - - def testWrongLatticeRegularization(self): - self.hparams.set_feature_param('x', 'lattice_l1_reg', 0.1) - self.hparams.set_feature_param('x', 'lattice_l2_reg', 0.1) - self.hparams.set_feature_param('x', 'lattice_l1_torsion_reg', 0.1) - self.hparams.set_feature_param('x', 'lattice_l1_torsion_reg', 0.1) - self.assertRaisesRegexp( - ValueError, - 'Hyperparameter configuration cannot be used in the calibrated rtl ' - 'estimator.', self.empty_estimator.check_hparams, self.hparams) - - -class CalibratedRtlTest(tf.test.TestCase): - - def setUp(self): - super(CalibratedRtlTest, self).setUp() - self._test_data = test_data.TestData(num_epochs=10) - - def _CalibratedRtlRegressor(self, - feature_names, - feature_columns, - num_lattices=1, - lattice_rank=1, - num_keypoints=_NUM_KEYPOINTS, - weight_column=None, - **hparams_args): - - def init_fn(): - return keypoints_initialization.uniform_keypoints_for_signal( - num_keypoints, -1., 1., 0., 1.) - - hparams = tfl_hparams.CalibratedRtlHParams( - feature_names, - num_keypoints=num_keypoints, - num_lattices=num_lattices, - lattice_rank=lattice_rank, - **hparams_args) - # Turn off monotonic calibrator. - hparams.set_param('calibration_monotonic', None) - hparams.set_param('learning_rate', 0.1) - - return scrtl.separately_calibrated_rtl_regressor( - feature_columns=feature_columns, - weight_column=weight_column, - hparams=hparams, - keypoints_initializers_fn=init_fn) - - def _CalibratedRtlClassifier(self, - feature_columns, - num_lattices=1, - lattice_rank=1, - **hparams_args): - - def init_fn(): - return keypoints_initialization.uniform_keypoints_for_signal( - _NUM_KEYPOINTS, -1., 1., 0., 1.) - - hparams = tfl_hparams.CalibratedRtlHParams( - num_keypoints=_NUM_KEYPOINTS, - num_lattices=num_lattices, - lattice_rank=lattice_rank, - **hparams_args) - # Turn off monotonic calibrator. - hparams.set_param('calibration_monotonic', None) - hparams.set_param('learning_rate', 0.1) - - return scrtl.separately_calibrated_rtl_classifier( - feature_columns=feature_columns, - hparams=hparams, - keypoints_initializers_fn=init_fn) - - def testCalibratedRtlRegressorTraining1D(self): - feature_columns = [ - tf.feature_column.numeric_column('x'), - ] - estimator = self._CalibratedRtlRegressor(['x'], - feature_columns, - num_lattices=3, - lattice_rank=1) - estimator.train(input_fn=self._test_data.oned_input_fn()) - results = estimator.evaluate(input_fn=self._test_data.oned_input_fn()) - self.assertLess(results['average_loss'], 1e-2) - - def testSeparatelyCalibratedRtlRegressorWeightedTraining1D(self): - feature_columns = [tf.feature_column.numeric_column('x')] - weight_column = tf.feature_column.numeric_column('zero') - estimator = self._CalibratedRtlRegressor(['x'], - feature_columns, - num_lattices=2, - weight_column=weight_column) - estimator.train(input_fn=self._test_data.oned_zero_weight_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.oned_zero_weight_input_fn()) - # Expects almost zero since the weight values are exactly zero. - self.assertLess(results['average_loss'], 1e-7) - - def testCalibratedRtlRegressorTraining2D(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedRtlRegressor(['x0', 'x1'], - feature_columns, - num_lattices=3, - lattice_rank=2) - estimator.train(input_fn=self._test_data.twod_input_fn()) - results = estimator.evaluate(input_fn=self._test_data.twod_input_fn()) - self.assertLess(results['average_loss'], 5e-3) - - def testCalibratedRtlRegressorTraining2DWithCalibrationRegularization(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedRtlRegressor(['x0', 'x1'], - feature_columns, - num_lattices=3, - lattice_rank=2, - calibration_l1_reg=1e-2, - calibration_l2_reg=1e-2, - calibration_l1_laplacian_reg=0.05, - calibration_l2_laplacian_reg=0.01) - estimator.train(input_fn=self._test_data.twod_input_fn()) - results = estimator.evaluate(input_fn=self._test_data.twod_input_fn()) - self.assertLess(results['average_loss'], 0.1) - - def testCalibratedLatticeRegressorTraining2DWithLatticeRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedRtlRegressor(['x0', 'x1'], - feature_columns, - num_lattices=2, - lattice_rank=2, - lattice_l1_reg=1.0, - lattice_l2_reg=1.0, - lattice_l1_torsion_reg=1.0, - lattice_l2_torsion_reg=1.0, - lattice_l1_laplacian_reg=1.0, - lattice_l2_laplacian_reg=0.1) - estimator.train(input_fn=self._test_data.twod_input_fn()) - results = estimator.evaluate(input_fn=self._test_data.twod_input_fn()) - # We expect the loss is larger than the loss without regularization. - self.assertGreater(results['average_loss'], 1e-2) - self.assertLess(results['average_loss'], 0.5) - - def testCalibratedLatticeRegressorTraining2DWithPerFeatureRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedRtlRegressor( - ['x0', 'x1'], - feature_columns, - num_lattices=2, - lattice_rank=2, - feature__x0__lattice_l1_laplacian_reg=100.0, - feature__x1__lattice_l2_laplacian_reg=1.0) - estimator.train(input_fn=self._test_data.twod_input_fn()) - results = estimator.evaluate(input_fn=self._test_data.twod_input_fn()) - # We expect the loss is larger than the loss without regularization. - self.assertGreater(results['average_loss'], 0.1) - self.assertLess(results['average_loss'], 0.2) - - def testCalibratedRtlClassifierTraining(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedRtlClassifier( - feature_columns, num_lattices=3, lattice_rank=2) - estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.twod_classificer_input_fn()) - self.assertGreater(results['auc'], 0.990) - - def testCalibratedRtlClassifierTrainingWithCalibrationRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedRtlClassifier( - feature_columns, - num_lattices=3, - lattice_rank=2, - interpolation_type='simplex', - calibration_l1_reg=1e-5, - calibration_l2_reg=1e-5, - calibration_l1_laplacian_reg=1e-5, - calibration_l2_laplacian_reg=1e-5) - estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.twod_classificer_input_fn()) - self.assertGreater(results['auc'], 0.980) - - def testCalibratedRtlClassifierTrainingWithLatticeRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedRtlClassifier( - feature_columns, - num_lattices=3, - lattice_rank=2, - interpolation_type='hypercube', - lattice_l1_reg=1.0, - lattice_l2_reg=1.0, - lattice_l1_torsion_reg=1.0, - lattice_l2_torsion_reg=1.0, - lattice_l1_laplacian_reg=1.0, - lattice_l2_laplacian_reg=1.0) - estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.twod_classificer_input_fn()) - # We expect AUC is worse than the model without regularization. - self.assertLess(results['auc'], 0.99) - self.assertGreater(results['auc'], 0.4) - - def testCalibratedRtlClassifierTrainingWithPerFeatureRegularizer(self): - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - estimator = self._CalibratedRtlClassifier( - feature_columns, - feature_names=['x0', 'x1'], - num_lattices=3, - lattice_rank=2, - feature__x0__lattice_l1_laplacian_reg=5.0, - feature__x1__lattice_l2_laplacian_reg=0.5) - estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) - results = estimator.evaluate( - input_fn=self._test_data.twod_classificer_input_fn()) - # We expect AUC is worse than the model without regularization. - self.assertLess(results['auc'], 0.98) - self.assertGreater(results['auc'], 0.7) - - def testCalibratedRtlMonotonicClassifierTraining(self): - # Construct the following training/testing pair. - # - # Training: (x, y) - # ([0., 0.], 0.0) - # ([0., 1.], 1.0) - # ([1., 0.], 1.0) - # ([1., 1.], 0.0) - # - # Test: (x, y) - # ([0., 0.], 0.0) - # ([0., 1.], 1.0) - # ([1., 0.], 1.0) - # ([1., 1.], 1.0) - # - # Note that training example has a noisy sample, ([1., 1.], 0.0), and test - # examples are generated by the logical-OR function. Therefore by enforcing - # increasing monotonicity to all features, we should be able to work well - # in the test examples. - x0 = np.array([0.0, 0.0, 1.0, 1.0]) - x1 = np.array([0.0, 1.0, 0.0, 1.0]) - x_samples = {'x0': x0, 'x1': x1} - training_y = np.array([[False], [True], [True], [False]]) - test_y = np.array([[False], [True], [True], [True]]) - - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x=x_samples, y=training_y, batch_size=4, num_epochs=1000, shuffle=False) - test_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x=x_samples, y=test_y, shuffle=False) - - # Define monotonic lattice classifier. - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - - def init_fn(): - return keypoints_initialization.uniform_keypoints_for_signal( - 2, 0., 1., 0., 1.) - - hparams = tfl_hparams.CalibratedRtlHParams( - num_keypoints=2, num_lattices=3, lattice_rank=2) - # Monotonic calibrated lattice. - - hparams.set_param('monotonicity', +1) - hparams.set_param('learning_rate', 0.1) - hparams.set_param('interpolation_type', 'hypercube') - - estimator = scrtl.separately_calibrated_rtl_classifier( - feature_columns=feature_columns, - hparams=hparams, - keypoints_initializers_fn=init_fn) - - estimator.train(input_fn=train_input_fn) - results = estimator.evaluate(input_fn=test_input_fn) - # We should expect 1.0 accuracy. - self.assertGreater(results['accuracy'], 0.999) - - def testCalibratedRtlWithMissingTraining(self): - # x0 is missing with it's own vertex: so it can take very different values, - # while x1 is missing and calibrated, in this case to the middle of the - # lattice. - x0 = np.array([0., 0., 1., 1., -1., -1., 0., 1.]) - x1 = np.array([0., 1., 0., 1., 0., 1., -1., -1.]) - training_y = np.array([1., 3., 7., 11., 23., 27., 2., 9.]) - x_samples = {'x0': x0, 'x1': x1} - - train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x=x_samples, - y=training_y, - batch_size=x0.shape[0], - num_epochs=2000, - shuffle=False) - test_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x=x_samples, y=training_y, shuffle=False) - feature_columns = [ - tf.feature_column.numeric_column('x0'), - tf.feature_column.numeric_column('x1'), - ] - - def init_fn(): - return keypoints_initialization.uniform_keypoints_for_signal( - 2, 0., 1., 0., 1.) - - hparams = tfl_hparams.CalibratedRtlHParams(['x0', 'x1'], - num_keypoints=2, - num_lattices=3, - lattice_rank=2, - learning_rate=0.1, - missing_input_value=-1.) - hparams.set_feature_param('x0', 'missing_vertex', True) - - estimator = scrtl.separately_calibrated_rtl_regressor( - feature_columns=feature_columns, - hparams=hparams, - keypoints_initializers_fn=init_fn) - - estimator.train(input_fn=train_input_fn) - results = estimator.evaluate(input_fn=test_input_fn) - self.assertLess(results['average_loss'], 0.1) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_lattice/python/estimators_test.py b/tensorflow_lattice/python/estimators_test.py new file mode 100644 index 0000000..13b871b --- /dev/null +++ b/tensorflow_lattice/python/estimators_test.py @@ -0,0 +1,715 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests TFL canned estimators.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import logging +from absl.testing import parameterized +import numpy as np +import pandas as pd +from sklearn.datasets import load_boston +import tensorflow as tf +from tensorflow import feature_column as fc +from tensorflow_lattice.python import configs +from tensorflow_lattice.python import estimators +from tensorflow_lattice.python import model_info +from tensorflow_estimator.python.estimator.head import regression_head + + +class CannedEstimatorsTest(parameterized.TestCase, tf.test.TestCase): + + def setUp(self): + super(CannedEstimatorsTest, self).setUp() + self.eps = 0.001 + + # UCI Statlog (Heart) dataset. + heart_csv_file = tf.keras.utils.get_file( + 'heart.csv', 'http://storage.googleapis.com/applied-dl/heart.csv') + heart_df = pd.read_csv(heart_csv_file) + heart_target = heart_df.pop('target') + heart_train_size = int(len(heart_df) * 0.8) + self.heart_train_x = heart_df[:heart_train_size] + self.heart_train_y = heart_target[:heart_train_size] + self.heart_test_x = heart_df[heart_train_size:] + self.heart_test_y = heart_target[heart_train_size:] + + # Feature columns. + # - age + # - sex + # - cp chest pain type (4 values) + # - trestbps resting blood pressure + # - chol serum cholestoral in mg/dl + # - fbs fasting blood sugar > 120 mg/dl + # - restecg resting electrocardiographic results (values 0,1,2) + # - thalach maximum heart rate achieved + # - exang exercise induced angina + # - oldpeak ST depression induced by exercise relative to rest + # - slope the slope of the peak exercise ST segment + # - ca number of major vessels (0-3) colored by flourosopy + # - thal 3 = normal; 6 = fixed defect; 7 = reversable defect + self.heart_feature_columns = [ + fc.numeric_column('age', default_value=-1), + fc.categorical_column_with_vocabulary_list('sex', [0, 1]), + fc.numeric_column('cp'), + fc.numeric_column('trestbps', default_value=-1), + fc.numeric_column('chol'), + fc.categorical_column_with_vocabulary_list('fbs', [0, 1]), + fc.categorical_column_with_vocabulary_list('restecg', [0, 1, 2]), + fc.numeric_column('thalach'), + fc.categorical_column_with_vocabulary_list('exang', [0, 1]), + fc.numeric_column('oldpeak'), + fc.categorical_column_with_vocabulary_list('slope', [0, 1, 2]), + fc.numeric_column('ca'), + fc.categorical_column_with_vocabulary_list( + 'thal', ['normal', 'fixed', 'reversible']), + ] + + # Feature configs. Each model can pick and choose which features to use. + self.heart_feature_configs = [ + configs.FeatureConfig( + name='age', + lattice_size=3, + pwl_calibration_num_keypoints=5, + monotonicity=1, + pwl_calibration_clip_max=100, + ), + configs.FeatureConfig( + name='cp', + pwl_calibration_num_keypoints=4, + pwl_calibration_input_keypoints='uniform', + monotonicity='increasing', + ), + configs.FeatureConfig( + name='chol', + pwl_calibration_input_keypoints=[126.0, 210.0, 247.0, 286.0, 564.0], + monotonicity=1, + pwl_calibration_clip_min=130, + pwl_calibration_clamp_min=True, + pwl_calibration_clamp_max=True, + regularizer_configs=[ + configs.RegularizerConfig(name='calib_hessian', l2=1e-4), + ], + ), + configs.FeatureConfig( + name='fbs', + monotonicity=[(0, 1)], + ), + configs.FeatureConfig( + name='trestbps', + pwl_calibration_num_keypoints=5, + monotonicity='decreasing', + ), + configs.FeatureConfig( + name='thalach', + pwl_calibration_num_keypoints=5, + monotonicity=-1, + ), + configs.FeatureConfig( + name='restecg', + monotonicity=[(0, 1), (0, 2)], + ), + configs.FeatureConfig( + name='exang', + monotonicity=[(0, 1)], + ), + configs.FeatureConfig( + name='oldpeak', + pwl_calibration_num_keypoints=5, + monotonicity=1, + ), + configs.FeatureConfig( + name='slope', + monotonicity=[(0, 1), (1, 2)], + ), + configs.FeatureConfig( + name='ca', + pwl_calibration_num_keypoints=4, + monotonicity='increasing', + ), + configs.FeatureConfig( + name='thal', + monotonicity=[('normal', 'fixed'), ('normal', 'reversible')], + ), + ] + + # UCI Boston dataset. + boston_dataset = load_boston() + boston_df = pd.DataFrame( + boston_dataset.data, columns=boston_dataset.feature_names) + boston_df['CHAS'] = boston_df['CHAS'].astype(np.int32) + boston_target = pd.Series(boston_dataset.target) + boston_train_size = int(len(boston_df) * 0.8) + self.boston_train_x = boston_df[:boston_train_size] + self.boston_train_y = boston_target[:boston_train_size] + self.boston_test_x = boston_df[boston_train_size:] + self.boston_test_y = boston_target[boston_train_size:] + + # Feature columns. + # - CRIM per capita crime rate by town + # - ZN proportion of residential land zoned for lots over 25,000 sq.ft + # - INDUS proportion of non-retail business acres per town + # - CHAS Charles River dummy variable (= 1 if tract bounds river) + # - NOX nitric oxides concentration (parts per 10 million) + # - RM average number of rooms per dwelling + # - AGE proportion of owner-occupied units built prior to 1940 + # - DIS weighted distances to five Boston employment centres + # - RAD index of accessibility to radial highways + # - TAX full-value property-tax rate per $10,000 + # - PTRATIO pupil-teacher ratio by town + # - B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town + # - LSTAT % lower status of the population + # - Target Median value of owner-occupied homes in $1000's + self.boston_feature_columns = [ + fc.numeric_column('CRIM'), + fc.numeric_column('ZN'), + fc.numeric_column('INDUS'), + fc.categorical_column_with_vocabulary_list('CHAS', [0, 1]), + fc.numeric_column('NOX'), + fc.numeric_column('RM'), + fc.numeric_column('AGE'), + fc.numeric_column('DIS'), + fc.numeric_column('RAD'), + fc.numeric_column('TAX'), + fc.numeric_column('PTRATIO'), + fc.numeric_column('B'), + fc.numeric_column('LSTAT'), + ] + + # Feature configs. Each model can pick and choose which features to use. + self.boston_feature_configs = [ + configs.FeatureConfig( + name='CRIM', + lattice_size=3, + monotonicity=-1, + pwl_calibration_convexity=1, + ), + configs.FeatureConfig( + name='ZN', + pwl_calibration_input_keypoints=[0.0, 25.0, 50.0, 75.0, 100.0], + monotonicity=1, + reflects_trust_in=[ + configs.TrustConfig(feature_name='RM', trust_type='trapezoid'), + ], + ), + configs.FeatureConfig( + name='INDUS', + pwl_calibration_input_keypoints='uniform', + pwl_calibration_always_monotonic=False, + reflects_trust_in=[ + configs.TrustConfig(feature_name='RM', + trust_type='edgeworth', + direction='negative'), + ], + regularizer_configs=[ + configs.RegularizerConfig(name='calib_wrinkle', l2=1e-4), + ], + ), + configs.FeatureConfig(name='CHAS',), + configs.FeatureConfig(name='NOX',), + configs.FeatureConfig( + name='RM', + monotonicity='increasing', + pwl_calibration_convexity='concave', + ), + configs.FeatureConfig( + name='AGE', + monotonicity=-1, + ), + configs.FeatureConfig( + name='DIS', + lattice_size=3, + unimodality=1, + ), + configs.FeatureConfig(name='RAD',), + configs.FeatureConfig(name='TAX',), + configs.FeatureConfig( + name='PTRATIO', + monotonicity='decreasing', + ), + configs.FeatureConfig(name='B',), + configs.FeatureConfig( + name='LSTAT', + monotonicity=-1, + dominates=[ + configs.DominanceConfig(feature_name='AGE', + dominance_type='monotonic'), + ], + ), + ] + + def _ResetAllBackends(self): + tf.keras.backend.clear_session() + tf.compat.v1.reset_default_graph() + + def _GetInputFn(self, x, y, num_epochs=1, batch_size=100): + return tf.compat.v1.estimator.inputs.pandas_input_fn( + x=x, + y=y, + batch_size=batch_size, + shuffle=False, + num_epochs=num_epochs, + num_threads=1) + + def _GetHeartTrainInputFn(self, **kwargs): + return self._GetInputFn(self.heart_train_x, self.heart_train_y, **kwargs) + + def _GetHeartTestInputFn(self, **kwargs): + return self._GetInputFn( + self.heart_test_x, self.heart_test_y, num_epochs=1, **kwargs) + + def _GetBostonTrainInputFn(self, **kwargs): + return self._GetInputFn(self.boston_train_x, self.boston_train_y, **kwargs) + + def _GetBostonTestInputFn(self, **kwargs): + return self._GetInputFn( + self.boston_test_x, self.boston_test_y, num_epochs=1, **kwargs) + + @parameterized.parameters( + ([ + 'age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg', 'thalach', + 'exang', 'oldpeak', 'slope', 'ca', 'thal' + ], [['sex', 'oldpeak'], ['fbs', 'thalach'], ['thalach', 'thal'], + ['cp', 'trestbps'], ['age', 'ca', 'chol'] + ], None, None, False, True, 0.8), + ([ + 'age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg', 'thalach', + 'exang', 'oldpeak', 'slope', 'ca', 'thal' + ], 'random', 6, 5, True, False, 0.85), + ([ + 'age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg', 'thalach', + 'exang', 'oldpeak', 'slope', 'ca', 'thal' + ], 'crystals', 6, 5, True, False, 0.85), + ) + def testCalibratedLatticeEnsembleClassifier(self, feature_names, lattices, + num_lattices, lattice_rank, + separate_calibrators, + output_calibration, auc): + self._ResetAllBackends() + feature_columns = [ + feature_column for feature_column in self.heart_feature_columns + if feature_column.name in feature_names + ] + feature_configs = [ + feature_config for feature_config in self.heart_feature_configs + if feature_config.name in feature_names + ] + model_config = configs.CalibratedLatticeEnsembleConfig( + regularizer_configs=[ + configs.RegularizerConfig(name='torsion', l2=1e-4), + configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4), + ], + feature_configs=feature_configs, + lattices=lattices, + num_lattices=num_lattices, + lattice_rank=lattice_rank, + separate_calibrators=separate_calibrators, + output_calibration=output_calibration, + ) + estimator = estimators.CannedClassifier( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=self._GetHeartTrainInputFn(num_epochs=1), + prefitting_input_fn=self._GetHeartTrainInputFn(num_epochs=50), + optimizer=tf.keras.optimizers.Adam(0.01), + prefitting_optimizer=tf.keras.optimizers.Adam(0.01)) + estimator.train(input_fn=self._GetHeartTrainInputFn(num_epochs=200)) + results = estimator.evaluate(input_fn=self._GetHeartTestInputFn()) + logging.info('Calibrated lattice ensemble classifier results:') + logging.info(results) + self.assertGreater(results['auc'], auc) + + @parameterized.parameters( + (['age', 'sex', 'fbs', 'restecg', 'ca', 'thal'], False, 0.75), + (['age', 'cp', 'chol', 'slope', 'ca', 'thal'], False, 0.8), + (['trestbps', 'thalach', 'exang', 'oldpeak', 'thal'], True, 0.8), + ) + def testCalibratedLatticeClassifier(self, feature_names, output_calibration, + auc): + self._ResetAllBackends() + feature_columns = [ + feature_column for feature_column in self.heart_feature_columns + if feature_column.name in feature_names + ] + feature_configs = [ + feature_config for feature_config in self.heart_feature_configs + if feature_config.name in feature_names + ] + model_config = configs.CalibratedLatticeConfig( + regularizer_configs=[ + configs.RegularizerConfig(name='torsion', l2=1e-4), + configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4), + ], + output_calibration=output_calibration, + feature_configs=feature_configs) + estimator = estimators.CannedClassifier( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=self._GetHeartTrainInputFn(num_epochs=1), + optimizer=tf.keras.optimizers.Adam(0.01)) + estimator.train(input_fn=self._GetHeartTrainInputFn(num_epochs=200)) + results = estimator.evaluate(input_fn=self._GetHeartTestInputFn()) + logging.info('Calibrated lattice classifier results:') + logging.info(results) + self.assertGreater(results['auc'], auc) + + @parameterized.parameters( + (['age', 'sex', 'fbs', 'restecg', 'ca', 'thal'], False, False, 0.7), + ([ + 'age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg', 'thalach', + 'exang', 'oldpeak', 'slope', 'ca', 'thal' + ], True, True, 0.8), + ) + def testCalibratedLinearClassifier(self, feature_names, output_calibration, + use_bias, auc): + self._ResetAllBackends() + feature_columns = [ + feature_column for feature_column in self.heart_feature_columns + if feature_column.name in feature_names + ] + feature_configs = [ + feature_config for feature_config in self.heart_feature_configs + if feature_config.name in feature_names + ] + model_config = configs.CalibratedLinearConfig( + use_bias=use_bias, + regularizer_configs=[ + configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4), + ], + output_calibration=output_calibration, + feature_configs=feature_configs) + estimator = estimators.CannedClassifier( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=self._GetHeartTrainInputFn(num_epochs=1), + optimizer=tf.keras.optimizers.Adam(0.01)) + estimator.train(input_fn=self._GetHeartTrainInputFn(num_epochs=200)) + results = estimator.evaluate(input_fn=self._GetHeartTestInputFn()) + logging.info('Calibrated linear classifier results:') + logging.info(results) + self.assertGreater(results['auc'], auc) + + @parameterized.parameters( + ([ + 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', + 'TAX', 'PTRATIO', 'B', 'LSTAT' + ], [['CRIM', 'ZN', 'RAD', 'DIS'], ['PTRATIO', 'LSTAT', 'ZN', 'RM'], + ['AGE', 'NOX', 'B'], ['INDUS', 'NOX', 'PTRATIO'], ['TAX', 'CHAS'], + ['CRIM', 'INDUS', 'AGE', 'RM', 'CHAS'] + ], None, None, False, True, 60.0), + ([ + 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', + 'TAX', 'PTRATIO', 'B', 'LSTAT' + ], 'random', 6, 5, True, False, 50.0), + ([ + 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', + 'TAX', 'PTRATIO', 'B', 'LSTAT' + ], 'crystals', 6, 5, True, False, 50.0), + ) + def testCalibratedLatticeEnsembleRegressor(self, feature_names, lattices, + num_lattices, lattice_rank, + separate_calibrators, + output_calibration, average_loss): + self._ResetAllBackends() + feature_columns = [ + feature_column for feature_column in self.boston_feature_columns + if feature_column.name in feature_names + ] + feature_configs = [ + feature_config for feature_config in self.boston_feature_configs + if feature_config.name in feature_names + ] + model_config = configs.CalibratedLatticeEnsembleConfig( + regularizer_configs=[ + configs.RegularizerConfig(name='torsion', l2=1e-5), + configs.RegularizerConfig(name='output_calib_hessian', l2=1e-5), + ], + feature_configs=feature_configs, + lattices=lattices, + num_lattices=num_lattices, + lattice_rank=lattice_rank, + separate_calibrators=separate_calibrators, + output_calibration=output_calibration, + ) + estimator = estimators.CannedRegressor( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=self._GetBostonTrainInputFn(num_epochs=1), + prefitting_input_fn=self._GetBostonTrainInputFn(num_epochs=50), + optimizer=tf.keras.optimizers.Adam(0.05), + prefitting_optimizer=tf.keras.optimizers.Adam(0.05)) + estimator.train(input_fn=self._GetBostonTrainInputFn(num_epochs=200)) + results = estimator.evaluate(input_fn=self._GetBostonTestInputFn()) + logging.info('Calibrated lattice ensemble regressor results:') + logging.info(results) + self.assertLess(results['average_loss'], average_loss) + + @parameterized.parameters( + (['CRIM', 'ZN', 'RM', 'DIS', 'PTRATIO', 'LSTAT'], False, 40.0), + (['CRIM', 'INDUS', 'CHAS', 'NOX', 'AGE', 'RAD', 'TAX', 'B'], True, 40.0), + (['CRIM', 'INDUS', 'LSTAT', 'NOX', 'AGE', 'RAD', 'TAX', 'B'], True, 40.0), + ) + def testCalibratedLatticeRegressor(self, feature_names, output_calibration, + average_loss): + self._ResetAllBackends() + feature_columns = [ + feature_column for feature_column in self.boston_feature_columns + if feature_column.name in feature_names + ] + feature_configs = [ + feature_config for feature_config in self.boston_feature_configs + if feature_config.name in feature_names + ] + model_config = configs.CalibratedLinearConfig( + regularizer_configs=[ + configs.RegularizerConfig(name='torsion', l2=1e-4), + configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4), + ], + output_calibration=output_calibration, + feature_configs=feature_configs) + estimator = estimators.CannedRegressor( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=self._GetBostonTrainInputFn(num_epochs=1), + optimizer=tf.keras.optimizers.Adam(0.01)) + estimator.train(input_fn=self._GetBostonTrainInputFn(num_epochs=200)) + results = estimator.evaluate(input_fn=self._GetBostonTestInputFn()) + logging.info('Calibrated lattice regressor results:') + logging.info(results) + self.assertLess(results['average_loss'], average_loss) + + @parameterized.parameters( + (['CRIM', 'ZN', 'RM', 'DIS', 'PTRATIO', 'LSTAT'], False, False, 40.0), + ([ + 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', + 'TAX', 'PTRATIO', 'B', 'LSTAT' + ], True, True, 40.0), + ) + def testCalibratedLinearRegressor(self, feature_names, output_calibration, + use_bias, average_loss): + self._ResetAllBackends() + feature_columns = [ + feature_column for feature_column in self.boston_feature_columns + if feature_column.name in feature_names + ] + feature_configs = [ + feature_config for feature_config in self.boston_feature_configs + if feature_config.name in feature_names + ] + model_config = configs.CalibratedLinearConfig( + use_bias=use_bias, + regularizer_configs=[ + configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4), + ], + output_calibration=output_calibration, + feature_configs=feature_configs) + estimator = estimators.CannedRegressor( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=self._GetBostonTrainInputFn(num_epochs=1), + optimizer=tf.keras.optimizers.Adam(0.01)) + estimator.train(input_fn=self._GetBostonTrainInputFn(num_epochs=200)) + results = estimator.evaluate(input_fn=self._GetBostonTestInputFn()) + logging.info('Calibrated linear regressor results:') + logging.info(results) + self.assertLess(results['average_loss'], average_loss) + + @parameterized.parameters( + (['CRIM', 'ZN', 'RM', 'DIS', 'PTRATIO', 'LSTAT'], False, False, 40.0), + ([ + 'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', + 'TAX', 'PTRATIO', 'B', 'LSTAT' + ], True, True, 40.0), + ) + def testCalibratedLinearEstimator(self, feature_names, output_calibration, + use_bias, average_loss): + self._ResetAllBackends() + feature_columns = [ + feature_column for feature_column in self.boston_feature_columns + if feature_column.name in feature_names + ] + feature_configs = [ + feature_config for feature_config in self.boston_feature_configs + if feature_config.name in feature_names + ] + model_config = configs.CalibratedLinearConfig( + use_bias=use_bias, + regularizer_configs=[ + configs.RegularizerConfig(name='output_calib_hessian', l2=1e-4), + ], + output_calibration=output_calibration, + feature_configs=feature_configs) + estimator = estimators.CannedEstimator( + head=regression_head.RegressionHead(), + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=self._GetBostonTrainInputFn(num_epochs=1), + optimizer=tf.keras.optimizers.Adam(0.01)) + estimator.train(input_fn=self._GetBostonTrainInputFn(num_epochs=200)) + results = estimator.evaluate(input_fn=self._GetBostonTestInputFn()) + logging.info('Calibrated linear regressor results:') + logging.info(results) + self.assertLess(results['average_loss'], average_loss) + + @parameterized.parameters( + (5, 6, False, True), + (4, 5, True, False), + ) + def testCalibratedLatticeEnsembleModelInfo(self, num_lattices, lattice_rank, + separate_calibrators, + output_calibration): + self._ResetAllBackends() + model_config = configs.CalibratedLatticeEnsembleConfig( + feature_configs=self.heart_feature_configs, + num_lattices=num_lattices, + lattice_rank=lattice_rank, + separate_calibrators=separate_calibrators, + output_calibration=output_calibration, + ) + estimator = estimators.CannedClassifier( + feature_columns=self.heart_feature_columns, + model_config=model_config, + feature_analysis_input_fn=self._GetHeartTrainInputFn(num_epochs=1), + prefitting_input_fn=self._GetHeartTrainInputFn(num_epochs=5), + optimizer=tf.keras.optimizers.Adam(0.01), + prefitting_optimizer=tf.keras.optimizers.Adam(0.01)) + estimator.train(input_fn=self._GetHeartTrainInputFn(num_epochs=20)) + + # Serving input fn is used to create saved models. + serving_input_fn = ( + tf.estimator.export.build_parsing_serving_input_receiver_fn( + feature_spec=fc.make_parse_example_spec(self.heart_feature_columns)) + ) + saved_model_path = estimator.export_saved_model(estimator.model_dir, + serving_input_fn) + logging.info('Model exported to %s', saved_model_path) + model = estimators.get_model_graph(saved_model_path) + + expected_num_nodes = ( + len(self.heart_feature_columns) + # Input features + num_lattices + # One lattice per submodel + 1 + # Averaging submodels + int(output_calibration)) # Output calibration + if separate_calibrators: + expected_num_nodes += num_lattices * lattice_rank + else: + expected_num_nodes += len(self.heart_feature_columns) + + self.assertLen(model.nodes, expected_num_nodes) + + @parameterized.parameters( + (['ZN', 'INDUS', 'RM'], 'random', 3, 1, + [['ZN', 'RM'], ['RM'], ['INDUS']]), + (['ZN', 'INDUS', 'RM'], 'crystals', 3, 1, + [['RM'], ['INDUS'], ['ZN', 'RM']]), + (['RM', 'LSTAT', 'AGE'], 'crystals', 3, 1, + [['LSTAT'], ['LSTAT', 'AGE'], ['RM']]), + ) + def testCalibratedLatticeEnsembleFix2dConstraintViolations(self, + feature_names, + lattices, + num_lattices, + lattice_rank, + expected_lattices): + self._ResetAllBackends() + feature_columns = [ + feature_column for feature_column in self.boston_feature_columns + if feature_column.name in feature_names + ] + feature_configs = [ + feature_config for feature_config in self.boston_feature_configs + if feature_config.name in feature_names + ] + + model_config = configs.CalibratedLatticeEnsembleConfig( + feature_configs=feature_configs, + lattices=lattices, + num_lattices=num_lattices, + lattice_rank=lattice_rank, + ) + estimator = estimators.CannedRegressor( + feature_columns=feature_columns, + model_config=model_config, + feature_analysis_input_fn=self._GetBostonTrainInputFn(num_epochs=1), + prefitting_input_fn=self._GetBostonTrainInputFn(num_epochs=50), + optimizer=tf.keras.optimizers.Adam(0.05), + prefitting_optimizer=tf.keras.optimizers.Adam(0.05)) + estimator.train(input_fn=self._GetBostonTrainInputFn(num_epochs=200)) + + # Serving input fn is used to create saved models. + serving_input_fn = ( + tf.estimator.export.build_parsing_serving_input_receiver_fn( + feature_spec=fc.make_parse_example_spec(feature_columns)) + ) + saved_model_path = estimator.export_saved_model(estimator.model_dir, + serving_input_fn) + logging.info('Model exported to %s', saved_model_path) + model = estimators.get_model_graph(saved_model_path) + lattices = [] + for node in model.nodes: + if isinstance(node, model_info.LatticeNode): + lattices.append([input_node.input_node.name + for input_node in node.input_nodes]) + + self.assertLen(lattices, len(expected_lattices)) + for lattice, expected_lattice in zip(lattices, expected_lattices): + self.assertCountEqual(lattice, expected_lattice) + + @parameterized.parameters( + ('linear', True), + ('lattice', False), + ) + def testCalibratedModelInfo(self, model_type, output_calibration): + self._ResetAllBackends() + if model_type == 'linear': + model_config = configs.CalibratedLinearConfig( + feature_configs=self.heart_feature_configs, + output_calibration=output_calibration, + ) + else: + model_config = configs.CalibratedLatticeConfig( + feature_configs=self.heart_feature_configs, + output_calibration=output_calibration, + ) + estimator = estimators.CannedClassifier( + feature_columns=self.heart_feature_columns, + model_config=model_config, + feature_analysis_input_fn=self._GetHeartTrainInputFn(num_epochs=1), + prefitting_input_fn=self._GetHeartTrainInputFn(num_epochs=5), + optimizer=tf.keras.optimizers.Adam(0.01), + prefitting_optimizer=tf.keras.optimizers.Adam(0.01)) + estimator.train(input_fn=self._GetHeartTrainInputFn(num_epochs=20)) + + # Serving input fn is used to create saved models. + serving_input_fn = ( + tf.estimator.export.build_parsing_serving_input_receiver_fn( + feature_spec=fc.make_parse_example_spec(self.heart_feature_columns)) + ) + saved_model_path = estimator.export_saved_model(estimator.model_dir, + serving_input_fn) + logging.info('Model exported to %s', saved_model_path) + model = estimators.get_model_graph(saved_model_path) + + expected_num_nodes = ( + 2 * len(self.heart_feature_columns) + # Input features and calibration + 1 + # Linear or lattice layer + int(output_calibration)) # Output calibration + + self.assertLen(model.nodes, expected_num_nodes) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensorflow_lattice/python/kernel_tests/BUILD b/tensorflow_lattice/python/kernel_tests/BUILD deleted file mode 100644 index 1d98263..0000000 --- a/tensorflow_lattice/python/kernel_tests/BUILD +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -licenses(["notice"]) # Apache 2.0 - -package( - default_visibility = [ - "//tensorflow_lattice:__subpackages__", - ], -) - -py_test( - name = "pwl_calibration_test", - size = "medium", - srcs = ["pwl_calibration_test.py"], - python_version = "PY2", - srcs_version = "PY2AND3", - deps = [ - "@org_tensorflow//tensorflow:tensorflow_py", - "//tensorflow_lattice/python:pwl_calibration_ops_py", - ], -) - -py_test( - name = "lattice_gradient_test", - size = "medium", - srcs = ["lattice_gradient_test.py"], - python_version = "PY2", - srcs_version = "PY2AND3", - deps = [ - "@org_tensorflow//third_party/py/numpy", - "@org_tensorflow//tensorflow:tensorflow_py", - "//tensorflow_lattice/python:lattice_ops_py", - ], -) diff --git a/tensorflow_lattice/python/kernel_tests/lattice_gradient_test.py b/tensorflow_lattice/python/kernel_tests/lattice_gradient_test.py deleted file mode 100644 index 9e8220b..0000000 --- a/tensorflow_lattice/python/kernel_tests/lattice_gradient_test.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for hypercube interpolation gradient.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - -from tensorflow_lattice.python.ops import lattice_ops - - -class LatticeGradientOpTest(tf.test.TestCase): - - def _testGradient(self, x_value_list, x_shape, lattice_sizes, y_shape, - is_hypercube): - """Compute the numerical gradients, and check the error.""" - for x_value in x_value_list: - with self.session(use_gpu=False): - x = tf.compat.v1.placeholder(dtype=tf.float32, shape=x_shape, name="x") - x_init_value = np.asarray(x_value, dtype=np.float32) - if is_hypercube: - y = lattice_ops.hypercube_interpolation( - x, lattice_sizes=lattice_sizes) - else: - y = lattice_ops.simplex_interpolation(x, lattice_sizes=lattice_sizes) - error = tf.compat.v1.test.compute_gradient_error( - x, x_shape, y, y_shape, x_init_value=x_init_value) - tf.compat.v1.logging.info("x_init_value = %s" % x_init_value) - tf.compat.v1.logging.info("x error = %f", error) - self.assertLess(error, 1e-4) - - def _testGradientWith1DInput(self, is_hypercube): - x_value_list = [[[-1.0]], [[0.1]], [[0.5]], [[1.001]], [[1.5]], [[2.001]], - [[3.0]]] - x_shape = (1, 1) - lattice_sizes = [3] - # interpolation_weight_size = 3. - y_shape = (1, 3) - self._testGradient( - x_value_list, - x_shape, - lattice_sizes, - y_shape, - is_hypercube=is_hypercube) - - def testHypercubeGradientWith1DInput(self): - self._testGradientWith1DInput(is_hypercube=True) - - def testSimplexGradientWith1DInput(self): - self._testGradientWith1DInput(is_hypercube=False) - - def _testGradientWith2DInput(self, is_hypercube): - x_value_list = [[[-1.0, 1.1]], [[0.1, 0.09]], [[0.5, 2.3]], [[1.001, 0.98]], - [[1.5, 0.34]], [[2.001, 10.0]], [[3.0, 0.5]]] - x_shape = (1, 2) - lattice_sizes = [3, 2] - # interpolation_weight_size = 6. - y_shape = (1, 6) - self._testGradient( - x_value_list, - x_shape, - lattice_sizes, - y_shape, - is_hypercube=is_hypercube) - - def testHypercubeGradientWith2DInput(self): - self._testGradientWith2DInput(is_hypercube=True) - - def testSimplexGradientWith2DInput(self): - self._testGradientWith2DInput(is_hypercube=False) - - def _testGradientWith3DInput(self, is_hypercube): - x_value_list = [[[-1.0, 1.1, 2.11]], [[0.1, 0.099, 0.111]], - [[0.5, 2.3, 2.212]], [[1.001, 0.98, 0.123]], - [[1.5, 0.34, 0.3312]], [[2.001, 10.0, 9.0]], - [[3.0, 0.5, -1.22]]] - x_shape = (1, 3) - lattice_sizes = [3, 3, 5] - # interpolation_weight_size = 45. - y_shape = (1, 45) - self._testGradient(x_value_list, x_shape, lattice_sizes, y_shape, - is_hypercube) - - def testHypercubeGradientWith3DInput(self): - self._testGradientWith3DInput(is_hypercube=True) - - def testSimplexGradientWith3DInput(self): - self._testGradientWith3DInput(is_hypercube=False) - - def testSimplexGradientWith3DBatchInput(self): - x_value_list = [[[0.5, 0.1, 0.3], [0.11, 0.3, 0.79], [0.33, 0.5, 0.79]]] - x_shape = (3, 3) - lattice_sizes = [2, 2, 2] - # interpolation_weight_size = 8. - y_shape = (3, 8) - self._testGradient( - x_value_list, x_shape, lattice_sizes, y_shape, is_hypercube=False) - - -class LatticeGradientBoundaryTest(tf.test.TestCase): - - def _testGradient(self, inputs, weights, expected_jacobians_wrt_input, - lattice_sizes, is_hypercube): - """Compute the grad_wrt_input and compare it with expected_grad_wrt_input. - - Args: - inputs: a 2D array (or numpy array) contains the test inputs. Its shape - should be num_examples x input_size. - weights: a 2D array (or numpy array) contains the test weights. Its shape - should be num_examples x weight_size. - expected_jacobians_wrt_input: 3D array (or numpy) contains a transpoed - jacobian matrix that contains dweight/dinput with shape (num_examples, - weight_size, input_size). In other words, - expected_jacobians_wrt_input[num][ii][jj] == - dweight[num][jj]/dinput[num][ii], where num means the current example. - lattice_sizes: A list of lattice_sizes. - is_hypercube: If true, hypercube gradient is tested, otherwise simplex - gradient is tested. - Returns: None - Raises: Fails if computed jacobian_wrt_inputs != expected_jacobian_wrt_inpu. - """ - - # Number of test examples in inputs. - num_examples = len(inputs) - weight_size = len(weights[0]) - - # Define the grad_wrt_input_tensor. - with tf.Graph().as_default(): - input_tensor = tf.constant(inputs, dtype=tf.float32) - weight_tensor = tf.constant(weights, dtype=tf.float32) - grad_wrt_weight_tensor = tf.compat.v1.placeholder( - dtype=tf.float32, shape=(num_examples, weight_size)) - - if is_hypercube: - grad_wrt_input_tensor = lattice_ops.hypercube_gradient( - input_tensor, weight_tensor, grad_wrt_weight_tensor, lattice_sizes) - else: - grad_wrt_input_tensor = lattice_ops.simplex_gradient( - input_tensor, weight_tensor, grad_wrt_weight_tensor, lattice_sizes) - - # Compute the Jacobian. - with self.session(use_gpu=False): - tf.compat.v1.logging.info("input = %s " % inputs) - tf.compat.v1.logging.info("weight = %s " % weights) - # num_examples x weight_size x input_size tensor. - jacobians_wrt_input = [] - # Compute dweight[cnt] / dinput. - for cnt in range(weight_size): - grad_wrt_weight = [0.] * weight_size - grad_wrt_weight[cnt] = 1.0 - grad_wrt_weights = [grad_wrt_weight for _ in range(num_examples)] - tf.compat.v1.logging.info("grad_wrt_weights = %s " % grad_wrt_weights) - # num_examples x input_size matrix. - grad_weight_wrt_inputs = grad_wrt_input_tensor.eval( - feed_dict={grad_wrt_weight_tensor: grad_wrt_weights}) - tf.compat.v1.logging.info("grad_wrt_inputs = %s " % - grad_weight_wrt_inputs) - jacobians_wrt_input.append(grad_weight_wrt_inputs) - tf.compat.v1.logging.info("jacobian_wrt_inputs = %s " % - jacobians_wrt_input) - tf.compat.v1.logging.info("expected_jacobian_wrt_inputs = %s" % - expected_jacobians_wrt_input) - self.assertAllClose(jacobians_wrt_input, expected_jacobians_wrt_input) - - def _test1DLatticeInputAtBoundary(self, is_hypercube): - # 1D lattice. - lattice_sizes = [4] - # Values at the boundaries. - inputs = [[-1.0], [0.0], [1.0], [2.0], [3.0], [4.0]] - # Interpolation weights and grad_wrt_weights. - weights = [[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], - [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0]] - # Total 6 test points. So expected_jacobian_wrt_input = 6 x 4 x 1 matrix, - # where iith row contains dweight[ii]/dinput[0]. - # Jacobain for the input -1.0: - # [0, 0, 0, 0]. - # Jacobian for the input, 0.0: - # [-1, 1, 0, 0] - # Jacobian for the input, 1.0: - # [0, -1, 1, 0] - # Jacobian for the input, 2.0: - # [0, 0, -1, 1] - # Jacobian for the input, 3.0: - # [0, 0, -1, 1] - # Jacobian for the input, 4.0: - # [0, 0, 0, 0] - expected_jacobian_wrt_input = [[[0], [-1], [0], [0], [0], [0]], - [[0], [1], [-1], [0], [0], [0]], - [[0], [0], [1], [-1], [-1], [0]], - [[0], [0], [0], [1], [1], [0]]] - - self._testGradient( - inputs, - weights, - expected_jacobian_wrt_input, - lattice_sizes, - is_hypercube=is_hypercube) - - def testHypercubeWith1DLatticeInputAtBoundary(self): - self._test1DLatticeInputAtBoundary(is_hypercube=True) - - def testSimplexWith1DLatticeInputAtBoundary(self): - self._test1DLatticeInputAtBoundary(is_hypercube=False) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensorflow_lattice/python/kernel_tests/pwl_calibration_test.py b/tensorflow_lattice/python/kernel_tests/pwl_calibration_test.py deleted file mode 100644 index 9090154..0000000 --- a/tensorflow_lattice/python/kernel_tests/pwl_calibration_test.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for piecewise-linear calibration gradient.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - -from tensorflow_lattice.python.ops import pwl_calibration_ops - -_MAX_ABSOLUTE_NUMERIC_ERROR = 1e-4 - - -class PWLCalibrationOpsTest(tf.test.TestCase): - - def _testInBetweenGradients(self, kp_inputs): - """Compares numerical with the calculated gradient and checks the error.""" - # Create batch with all values in between the keypoints inputs. - x_values = [] - for ii in range(len(kp_inputs) - 1): - x_values += [(kp_inputs[ii] + kp_inputs[ii + 1]) / 2] - x_values = np.asarray(x_values, dtype=np.float32) - - tf.compat.v1.logging.info("kp_inputs = %s" % kp_inputs) - tf.compat.v1.logging.info("x_values = %s" % x_values) - with tf.Graph().as_default(): - with self.session(use_gpu=False): - x_shape = [x_values.size] - x = tf.compat.v1.placeholder(dtype=np.float32, shape=x_shape, name="x") - y_shape = [x_values.size, len(kp_inputs)] - - # Dense version. - y_dense = pwl_calibration_ops.pwl_indexing_calibrator( - input=x, kp_inputs=tf.constant(kp_inputs, dtype=tf.float32)) - y_dense_values = y_dense.eval(feed_dict={x: x_values}) - tf.compat.v1.logging.info("y_dense=%s" % (y_dense_values,)) - dense_error = tf.compat.v1.test.compute_gradient_error( - x, x_shape, y_dense, y_shape, x_init_value=x_values) - tf.compat.v1.logging.info("dense_error = %f" % dense_error) - self.assertLess(dense_error, _MAX_ABSOLUTE_NUMERIC_ERROR) - - # Sparse version. - sparse_indices, sparse_weights = ( - pwl_calibration_ops.pwl_indexing_calibrator_sparse( - input=x, kp_inputs=tf.constant(kp_inputs, dtype=tf.float32))) - y_sparse = tf.sparse.to_dense( - tf.SparseTensor(sparse_indices, sparse_weights, y_shape)) - y_sparse_values = y_sparse.eval(feed_dict={x: x_values}) - tf.compat.v1.logging.info("y_sparse=%s" % (y_sparse_values,)) - sparse_weights_values = sparse_weights.eval(feed_dict={x: x_values}) - sparse_error = tf.compat.v1.test.compute_gradient_error( - x, - x_shape, - sparse_weights, - sparse_weights_values.shape, - x_init_value=x_values) - tf.compat.v1.logging.info("sparse_error = %f" % sparse_error) - self.assertLess(sparse_error, _MAX_ABSOLUTE_NUMERIC_ERROR) - - self.assertTrue( # Checks dense and sparse y's are the same. - np.allclose( - y_dense_values, y_sparse_values, atol=_MAX_ABSOLUTE_NUMERIC_ERROR)) - - def testInBetweenGradients(self): - # Notice we don't test the gradients on top of the keypoints (including - # edges) because the gradient cannot be calculated numerically on those - # points. - # But our op define arbitrary values for them, and they are tested - # in the C++ implementation. Here it suffices to test that the proper op - # gradient c++ implementation is being called. - self._testInBetweenGradients([0.0, 1.0]) - self._testInBetweenGradients([0.0, 1.0, 2.0, 3.0, 4.0]) - self._testInBetweenGradients([0.0, 1.0, 10.0, 100.0]) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensorflow_lattice/python/lattice_layer.py b/tensorflow_lattice/python/lattice_layer.py new file mode 100644 index 0000000..1d2239b --- /dev/null +++ b/tensorflow_lattice/python/lattice_layer.py @@ -0,0 +1,851 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Lattice layer with monotonicity, unimodality, trust and bound constraints. + +Keras implementation of tensorflow lattice layer. This layer takes one or more +d-dimensional input(s) and combines them using a lattice function, satisfying +monotonicity, unimodality, trust and bound constraints if specified. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from . import lattice_lib +import six +import tensorflow as tf +from tensorflow import keras + +LATTICE_KERNEL_NAME = "lattice_kernel" +LATTICE_SIZES_NAME = "lattice_sizes" + + +class Lattice(keras.layers.Layer): + # pyformat: disable + """Lattice layer. + + Layer performs interpolation using one of `units` d-dimension lattices with + arbitrary number of keypoints per dimension. There are trainable weights + associated with lattice vertices. Input to this layer is considered to be a + d-dimensional point within the lattice. If point coincides with one of the + lattice vertex then interpolation result for this point is equal to weight + associated with that vertex. Otherwise, all surrounding vertices contribute to + the interpolation result inversely proportional to the distance from them. + + For example lattice sizes: [2, 3] produce following lattice: + + ``` + o---o---o + | | | + o---o---o + ``` + + First coordinate of input tensor must be within [0, 1], and the second within + [0, 2]. If coordinates are outside of this range they will be clipped into it. + + There are several types of constraints on the shape of the learned function + that are either 1 or 2 dimensional: + + * **Monotonicity:** constrains the function to be either increasing or + decreasing in that dimension. + * **Unimodality:** constrains the function to be unimodal in that dimension + with minimum being in the center lattice vertex of that dimension. Single + dimension can not be constrained to be both monotonic and unimodal. + Unimodal dimensions must have at least 3 lattice vertices. + * **Edgeworth Trust:** constrains the function to be more responsive to a main + feature as a secondary conditional feature increases or decreases. For + example, we may want the model to rely more on average rating (main + feature) when the number of reviews (conditional feature) is high. In + particular, the constraint guarantees that a given change in the main + feature's value will change the model output by more when a secondary + feature indicates higher trust in the main feature. Note that the + constraint only works when the model is monotonic in the main feature. + * **Trapezoid Trust:** conceptually similar to edgeworth trust, but this + constraint guarantees that the range of possible outputs along the main + feature dimension, when a conditional feature indicates low trust, is a + *subset* of the range of outputs when a conditional feature indicates high + trust. When lattices have 2 vertices in each constrained dimension, this + implies edgeworth trust (which only constrains the size of the relevant + ranges). With more than 2 lattice vertices per dimension, the two + constraints diverge and are not necessarily 'weaker' or 'stronger' than + each other - edgeworth trust acts throughout the lattice interior on delta + shifts in the main feature, while trapezoid trust only acts on the min and + max extremes of the main feature, constraining the overall range of + outputs across the domain of the main feature. The two types of trust + constraints can be applied jointly. + * **Monotonic Dominance:** constrains the function to require the effect + (slope) in the direction of the *dominant* dimension to be greater than that + of the *weak* dimension for any point in the lattice. Both dominant and weak + dimensions must be monotonic. Note that this constraint might not be + strictly satisified at the end of training. In such cases, increase the + number of projection iterations. + * **Joint Monotonicity:** constrains the function to be monotonic along a + diagonal direction of a two dimensional subspace when all other dimensions + are fixed. For example, if our function is scoring the profit given *A* + hotel guests and *B* hotel beds, it may be wrong to constrain the profit to + be increasing in either hotel guests or hotel beds in-dependently, but along + the diagonal (+ 1 guest and +1 bed), the profit should be monotonic. Note + that this constraint might not be strictly satisified at the end of + training. In such cases, increase the number of projection iterations. + + There are upper and lower bound constraints on the output. + + All units share the same layer configuration, but each has their separate set + of trained parameters. + + Input shape: + - if `units == 1`: tensor of shape: `(batch_size, ..., len(lattice_sizes))` + or list of `len(lattice_sizes)` tensors of same shape: + `(batch_size, ..., 1)` + - if `units > 1`: tensor of shape: + `(batch_size, ..., units, len(lattice_sizes))` or list of + `len(lattice_sizes)` tensors of same shape: `(batch_size, ..., units, 1)` + + A typical shape is: `(batch_size, len(lattice_sizes))` + + Output shape: + Tensor of shape: `(batch_size, ..., units)` + + Attributes: + - All `__init__` arguments. + kernel: weights of the lattice. + + Example: + + ```python + lattice = tfl.lattice_layer.Lattice( + # Number of vertices along each dimension. + lattice_sizes=[2, 2, 3, 4, 2, 2, 3], + # You can specify monotonicity constraints. + monotonicities=['increasing', 'none', 'increasing', 'increasing', + 'increasing', 'increasing', 'increasing'], + # You can specify trust constraints between pairs of features. Here we + # constrain the function to be more responsive to a main feature (index 4) + # as a secondary conditional feature (index 3) increases (positive + # direction). + edgeworth_trusts=(4, 3, 'positive'), + # Output can be bounded. + output_min=0.0, + output_max=1.0) + ``` + """ + # pyformat: enable + + def __init__(self, + lattice_sizes, + units=1, + monotonicities=None, + unimodalities=None, + edgeworth_trusts=None, + trapezoid_trusts=None, + monotonic_dominances=None, + joint_monotonicities=None, + output_min=None, + output_max=None, + num_projection_iterations=10, + monotonic_at_every_step=True, + clip_inputs=True, + kernel_initializer="linear_initializer", + kernel_regularizer=None, + **kwargs): + # pyformat: disable + """Initializes an instance of `Lattice`. + + Args: + lattice_sizes: List or tuple of length d of integers which represents + number of lattice vertices per dimension (minimum is 2). Second + dimension of input shape must match the number of elements in lattice + sizes. + units: Output dimension of the layer. See class comments for details. + monotonicities: None or list or tuple of same length as lattice_sizes of + {'none', 'increasing', 0, 1} which specifies if the model output should + be monotonic in corresponding feature, using 'increasing' or 1 to + indicate increasing monotonicity and 'none' or 0 to indicate no + monotonicity constraints. + unimodalities: None or list or tuple of same length as lattice_sizes of + {'none', 'valley', 0, 1} which specifies if the model output should + be unimodal in corresponding feature, using 'valley' or 1 to indicate + that function first decreases, then increases and 'none' or 0 to + indicate no unimodality constraints. + edgeworth_trusts: None or three-element tuple or iterable of three-element + tuples. First element is the index of the main (monotonic) feature. + Second element is the index of the conditional feature. Third element is + the direction of trust: 'positive' or 1 if higher values of the + conditional feature should increase trust in the main feature and + 'negative' or -1 otherwise. + trapezoid_trusts: None or three-element tuple or iterable of three-element + tuples. First element is the index of the main (monotonic) feature. + Second element is the index of the conditional feature. Third element is + the direction of trust: 'positive' or 1 if higher values of the + conditional feature should increase trust in the main feature and + 'negative' or -1 otherwise. + monotonic_dominances: None or two-element tuple or iterable of two-element + tuples. First element is the index of the dominant feature. Second + element is the index of the weak feature. + joint_monotonicities: None or two-element tuple or iterable of two-element + tuples which represents indices of two features requiring joint + monotonicity. + output_min: None or lower bound of the output. + output_max: None or upper bound of the output. + num_projection_iterations: Number of iterations of Dykstra projections + algorithm. Projection updates will be closer to a true projection (with + respect to the L2 norm) with higher number of iterations. Increasing + this number has diminishing return on projection precsion. Infinite + number of iterations would yield perfect projection. Increasing this + number might slightly improve convergence by cost of slightly increasing + running time. Most likely you want this number to be proportional to + number of lattice vertices in largest constrained dimension. + monotonic_at_every_step: Whether to strictly enforce monotonicity and + trust constraints after every gradient update by applying a final + imprecise projection. Setting this parameter to True together with small + num_projection_iterations parameter is likely to hurt convergence. + clip_inputs: If inputs should be clipped to the input range of the + lattice. + kernel_initializer: None or one of: + - `'linear_initializer'`: initialize parameters to form a linear + function with positive and equal coefficients for monotonic dimensions + and 0.0 coefficients for other dimensions. Linear function is such + that minimum possible output is equal to output_min and maximum + possible output is equal to output_max. See LinearInitializer class + docstring for more details. + - Any Keras initializer object. + kernel_regularizer: None or a single element or a list of following: + - Tuple `('torsion', l1, l2)` where l1 and l2 represent corresponding + regularization amount for graph Torsion regularizer. l1 and l2 can + either be single floats or lists of floats to specify different + regularization amount for every dimension. + - Tuple `('laplacian', l1, l2)` where l1 and l2 represent corresponding + regularization amount for graph Laplacian regularizer. l1 and l2 can + either be single floats or lists of floats to specify different + regularization amount for every dimension. + - Any Keras regularizer object. + **kwargs: Other args passed to `tf.keras.layers.Layer` initializer. + + Raises: + ValueError: If layer hyperparameters are invalid. + """ + # pyformat: enable + lattice_lib.verify_hyperparameters( + lattice_sizes=lattice_sizes, + monotonicities=monotonicities, + unimodalities=unimodalities) + super(Lattice, self).__init__(**kwargs) + + self.lattice_sizes = lattice_sizes + self.units = units + self.monotonicities = monotonicities + self.unimodalities = unimodalities + # Check if inputs are a single tuple of ints (vs an iterable of tuples) + if (isinstance(edgeworth_trusts, tuple) and + isinstance(edgeworth_trusts[0], int)): + self.edgeworth_trusts = [edgeworth_trusts] + else: + self.edgeworth_trusts = edgeworth_trusts + if (isinstance(trapezoid_trusts, tuple) and + isinstance(trapezoid_trusts[0], int)): + self.trapezoid_trusts = [trapezoid_trusts] + else: + self.trapezoid_trusts = trapezoid_trusts + if (isinstance(monotonic_dominances, tuple) and + isinstance(monotonic_dominances[0], int)): + self.monotonic_dominances = [monotonic_dominances] + else: + self.monotonic_dominances = monotonic_dominances + if (isinstance(joint_monotonicities, tuple) and + isinstance(joint_monotonicities[0], int)): + self.joint_monotonicities = [joint_monotonicities] + else: + self.joint_monotonicities = joint_monotonicities + self.output_min = output_min + self.output_max = output_max + self.num_projection_iterations = num_projection_iterations + self.monotonic_at_every_step = monotonic_at_every_step + self.clip_inputs = clip_inputs + + if kernel_initializer in ["linear_initializer", "LinearInitializer"]: + # Come up with reasonable default initialization parameters if they were + # not defined explicitly. + if output_min is not None: + output_init_min = output_min + elif output_max is not None: + output_init_min = min(0.0, output_max) + else: + output_init_min = 0.0 + if output_max is not None: + output_init_max = output_max + elif output_min is not None: + output_init_max = max(1.0, output_min) + else: + output_init_max = 1.0 + + self.kernel_initializer = LinearInitializer( + lattice_sizes=lattice_sizes, + monotonicities=monotonicities, + output_min=output_init_min, + output_max=output_init_max, + unimodalities=unimodalities) + else: + # This is needed for Keras deserialization logic to be aware of our custom + # objects. + with keras.utils.custom_object_scope({ + "LinearInitializer": LinearInitializer, + }): + self.kernel_initializer = keras.initializers.get(kernel_initializer) + + self.kernel_regularizer = [] + if kernel_regularizer: + if (callable(kernel_regularizer) or + (isinstance(kernel_regularizer, tuple) and + isinstance(kernel_regularizer[0], six.string_types))): + kernel_regularizer = [kernel_regularizer] + + for regularizer in kernel_regularizer: + if isinstance(regularizer, tuple): + (name, l1, l2) = regularizer + if name.lower() == "torsion": + self.kernel_regularizer.append( + TorsionRegularizer( + lattice_sizes=self.lattice_sizes, l1=l1, l2=l2)) + elif name.lower() == "laplacian": + self.kernel_regularizer.append( + LaplacianRegularizer( + lattice_sizes=self.lattice_sizes, l1=l1, l2=l2)) + else: + raise ValueError("Unknown custom lattice regularizer: %s" % + regularizer) + else: + # This is needed for Keras deserialization logic to be aware of our + # custom objects. + with keras.utils.custom_object_scope({ + "TorsionRegularizer": TorsionRegularizer, + "LaplacianRegularizer": LaplacianRegularizer, + }): + self.kernel_regularizer.append(keras.regularizers.get(regularizer)) + + def build(self, input_shape): + """Standard Keras build() method.""" + lattice_lib.verify_hyperparameters( + lattice_sizes=self.lattice_sizes, + units=self.units, + input_shape=input_shape) + constraints = LatticeConstraints( + lattice_sizes=self.lattice_sizes, + monotonicities=self.monotonicities, + unimodalities=self.unimodalities, + edgeworth_trusts=self.edgeworth_trusts, + trapezoid_trusts=self.trapezoid_trusts, + monotonic_dominances=self.monotonic_dominances, + joint_monotonicities=self.joint_monotonicities, + output_min=self.output_min, + output_max=self.output_max, + num_projection_iterations=self.num_projection_iterations, + enforce_strict_monotonicity=self.monotonic_at_every_step) + + if not self.kernel_regularizer: + kernel_reg = None + elif len(self.kernel_regularizer) == 1: + kernel_reg = self.kernel_regularizer[0] + else: + # Keras interface assumes only one regularizer, so summ all regularization + # losses which we have. + kernel_reg = lambda x: tf.add_n([r(x) for r in self.kernel_regularizer]) + + num_weights = 1 + for dim_size in self.lattice_sizes: + num_weights *= dim_size + self.kernel = self.add_weight( + LATTICE_KERNEL_NAME, + shape=[num_weights, self.units], + initializer=self.kernel_initializer, + regularizer=kernel_reg, + constraint=constraints, + dtype=self.dtype) + + if self.kernel_regularizer and not tf.executing_eagerly(): + # Keras has its own mechanism to handle regularization losses which does + # not use GraphKeys, but we want to also add losses to graph keys so they + # are easily accessable when layer is being used outside of Keras. Adding + # losses to GraphKeys will not interfer with Keras. + for reg in self.kernel_regularizer: + tf.compat.v1.add_to_collection( + tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES, reg(self.kernel)) + + # Constraints with enforce_strict_monotonicity always set to True. Intended + # to be run at the end of training or any time when you need everything to + # be strictly projected. + self._final_constraints = LatticeConstraints( + lattice_sizes=self.lattice_sizes, + monotonicities=self.monotonicities, + unimodalities=self.unimodalities, + edgeworth_trusts=self.edgeworth_trusts, + trapezoid_trusts=self.trapezoid_trusts, + monotonic_dominances=self.monotonic_dominances, + joint_monotonicities=self.joint_monotonicities, + output_min=self.output_min, + output_max=self.output_max, + num_projection_iterations=20, + enforce_strict_monotonicity=True) + + self.lattice_sizes_tensor = tf.constant( + self.lattice_sizes, dtype=tf.int32, name=LATTICE_SIZES_NAME) + super(Lattice, self).build(input_shape) + + def call(self, inputs): + """Standard Keras call() method.""" + interpolation_weights = lattice_lib.compute_interpolation_weights( + inputs=inputs, + lattice_sizes=self.lattice_sizes, + clip_inputs=self.clip_inputs) + + # Use control dependencies to save lattice sizes as graph constant for + # visualisation toolbox to be able to recove it from saved graph. + # Wrap this constant into pure op since in TF 2.0 there are issues passing + # tensors into control_dependencies. + with tf.control_dependencies([tf.identity(self.lattice_sizes_tensor)]): + if self.units == 1: + # Weights shape: (batch-size, ..., prod(lattice_sizes)) + # Kernel shape: (prod(lattice_sizes), 1) + return tf.matmul(interpolation_weights, self.kernel) + else: + # Weights shape: (batch-size, ..., units, prod(lattice_sizes)) + # Kernel shape: (prod(lattice_sizes), units) + return tf.reduce_sum( + interpolation_weights * tf.transpose(self.kernel), axis=-1) + + def compute_output_shape(self, input_shape): + """Standard Keras compute_output_shape() method.""" + if isinstance(input_shape, list): + input_shape = input_shape[0] + if self.units == 1: + return tuple(input_shape[:-1]) + (1,) + else: + # Second to last dimension must be equal to 'units'. Nothing to append. + return input_shape[:-1] + + def get_config(self): + """Standard Keras config for serialization.""" + config = { + "lattice_sizes": self.lattice_sizes, + "units": self.units, + "monotonicities": self.monotonicities, + "unimodalities": self.unimodalities, + "edgeworth_trusts": self.edgeworth_trusts, + "trapezoid_trusts": self.trapezoid_trusts, + "monotonic_dominances": self.monotonic_dominances, + "joint_monotonicities": self.joint_monotonicities, + "output_min": self.output_min, + "output_max": self.output_max, + "num_projection_iterations": self.num_projection_iterations, + "monotonic_at_every_step": self.monotonic_at_every_step, + "clip_inputs": self.clip_inputs, + "kernel_initializer": + keras.initializers.serialize(self.kernel_initializer), + "kernel_regularizer": + [keras.regularizers.serialize(r) for r in self.kernel_regularizer], + } # pyformat: disable + config.update(super(Lattice, self).get_config()) + return config + + def finalize_constraints(self): + """Ensures layers weights strictly satisfy constraints. + + Applies approximate projection to strictly satisfy specified constraints. + If `monotonic_at_every_step == True` there is no need to call this function. + + Returns: + In eager mode directly updates weights and returns variable which stores + them. In graph mode returns `assign_add` op which has to be executed to + updates weights. + """ + return self.kernel.assign_add( + self._final_constraints(self.kernel) - self.kernel) + + def assert_constraints(self, eps=1e-6): + """Asserts that weights satisfy all constraints. + + In graph mode builds and returns list of assertion ops. + In eager mode directly executes assetions. + + Args: + eps: allowed constraints violation. + + Returns: + List of assertion ops in graph mode or immideately asserts in eager mode. + """ + return lattice_lib.assert_constraints( + weights=self.kernel, + lattice_sizes=self.lattice_sizes, + monotonicities=lattice_lib.canonicalize_monotonicities( + self.monotonicities), + edgeworth_trusts=lattice_lib.canonicalize_trust(self.edgeworth_trusts), + trapezoid_trusts=lattice_lib.canonicalize_trust(self.trapezoid_trusts), + monotonic_dominances=self.monotonic_dominances, + joint_monotonicities=self.joint_monotonicities, + output_min=self.output_min, + output_max=self.output_max, + eps=eps) + + +class LinearInitializer(keras.initializers.Initializer): + # pyformat: disable + """Initializes a `tfl.lattice_layer.Lattice` as linear function. + + - The linear function will have positive coefficients for monotonic dimensions + and 0 otherwise. If all dimensions are unconstrained, all coefficients will + be positive. + - Linear coefficients are set such that the minimum/maximum output of the + lattice matches the given output_min/output_max. + - Each monotonic dimension contributes with same weight regardless of number + of vertices per dimension. + - No dimension can be both monotonic and unimodal. + - Unimodal dimensions contribute with same weight as monotonic dimensions. + - Unimodal dimensions linearly decrease for first `(dim_size + 1) // 2` + vertices and then linearly increase for following vertices. + + Attributes: + - All `__init__` arguments. + """ + # pyformat: enable + + def __init__(self, + lattice_sizes, + monotonicities, + output_min, + output_max, + unimodalities=None): + """Initializes an instance of `LinearInitializer`. + + Args: + lattice_sizes: Lattice sizes of `tfl.lattice_layer.Lattice` to initialize. + monotonicities: Monotonic dimensions for initialization. Does not need to + match `monotonicities` of `tfl.lattice_layer.Lattice`. + output_min: Minimum layer output after initialization. + output_max: Maximum layer output after initialization. + unimodalities: None or unimodal dimensions after initialization. Does not + need to match `unimodalities` of `tfl.lattice_layer.Lattice`. + + Raises: + ValueError: If there is a mismatch between `monotonicities` and + `lattice_sizes`. + """ + lattice_lib.verify_hyperparameters( + lattice_sizes=lattice_sizes, + monotonicities=monotonicities, + unimodalities=unimodalities, + output_min=output_min, + output_max=output_max) + + self.lattice_sizes = lattice_sizes + self.monotonicities = monotonicities + self.output_min = output_min + self.output_max = output_max + self.unimodalities = unimodalities + + def __call__(self, shape, dtype=None, partition_info=None): + """Returns weights of `tfl.lattice_layer.Lattice` layer. + + Args: + shape: Must be: `(prod(lattice_sizes), units)`. + dtype: Standard Keras initializer param. + partition_info: Standard Keras initializer param. Not used. + """ + # TODO: figure out whether it should be used. + del partition_info + return lattice_lib.linear_initializer( + lattice_sizes=self.lattice_sizes, + monotonicities=lattice_lib.canonicalize_monotonicities( + self.monotonicities), + unimodalities=lattice_lib.canonicalize_unimodalities( + self.unimodalities), + output_min=self.output_min, + output_max=self.output_max, + units=shape[1], + dtype=dtype) + + def get_config(self): + """Standard Keras config for serialization.""" + config = { + "lattice_sizes": self.lattice_sizes, + "monotonicities": self.monotonicities, + "output_min": self.output_min, + "output_max": self.output_max, + "unimodalities": self.unimodalities, + } # pyformat: disable + return config + + +class LatticeConstraints(keras.constraints.Constraint): + # pyformat: disable + """Constraints for `tfl.lattice_layer.Lattice` layer. + + Applies monotonicity, unimodality, trust and bound constraints to the lattice + parameters. See `tfl.lattice_layer.Lattice` for details. + + Attributes: + - All `__init__` arguments. + """ + # pyformat: enable + + def __init__(self, + lattice_sizes, + monotonicities=None, + unimodalities=None, + edgeworth_trusts=None, + trapezoid_trusts=None, + monotonic_dominances=None, + joint_monotonicities=None, + output_min=None, + output_max=None, + num_projection_iterations=1, + enforce_strict_monotonicity=True): + """Initializes an instance of `LatticeConstraints`. + + Args: + lattice_sizes: Lattice sizes of `Lattice` layer to constraint. + monotonicities: Same meaning as corresponding parameter of `Lattice`. + unimodalities: Same meaning as corresponding parameter of `Lattice`. + edgeworth_trusts: Same meaning as corresponding parameter of `Lattice`. + trapezoid_trusts: Same meaning as corresponding parameter of `Lattice`. + monotonic_dominances: Same meaning as corresponding parameter of + `Lattice`. + joint_monotonicities: Same meaning as corresponding parameter of + `Lattice`. + output_min: Minimum possible output. + output_max: Maximum possible output. + num_projection_iterations: Same meaning as corresponding parameter of + `Lattice`. + enforce_strict_monotonicity: Whether to use approximate projection to + ensure that constratins are strictly satisfied. + + Raises: + ValueError: If weights to project don't correspond to `lattice_sizes`. + """ + lattice_lib.verify_hyperparameters( + lattice_sizes=lattice_sizes, + monotonicities=monotonicities, + unimodalities=unimodalities, + edgeworth_trusts=edgeworth_trusts, + trapezoid_trusts=trapezoid_trusts, + monotonic_dominances=monotonic_dominances, + joint_monotonicities=joint_monotonicities) + + self.lattice_sizes = lattice_sizes + self.monotonicities = monotonicities + self.unimodalities = unimodalities + self.edgeworth_trusts = edgeworth_trusts + self.trapezoid_trusts = trapezoid_trusts + self.monotonic_dominances = monotonic_dominances + self.joint_monotonicities = joint_monotonicities + self.output_min = output_min + self.output_max = output_max + self.num_projection_iterations = num_projection_iterations + self.enforce_strict_monotonicity = enforce_strict_monotonicity + + def __call__(self, w): + """Applies constraints to `w`.""" + canonical_monotonicities = lattice_lib.canonicalize_monotonicities( + self.monotonicities) + canonical_unimodalities = lattice_lib.canonicalize_unimodalities( + self.unimodalities) + canonical_edgeworth_trusts = lattice_lib.canonicalize_trust( + self.edgeworth_trusts) + canonical_trapezoid_trusts = lattice_lib.canonicalize_trust( + self.trapezoid_trusts) + num_constraint_dims = lattice_lib.count_non_zeros( + canonical_monotonicities, canonical_unimodalities) + # No need to separately check for trust constraints and monotonic dominance, + # since monotonicity is required to impose them. The only exception is joint + # monotonicity. + if (num_constraint_dims > 0 or self.joint_monotonicities): + w = lattice_lib.project_by_dykstra( + w, + lattice_sizes=self.lattice_sizes, + monotonicities=canonical_monotonicities, + unimodalities=canonical_unimodalities, + edgeworth_trusts=canonical_edgeworth_trusts, + trapezoid_trusts=canonical_trapezoid_trusts, + monotonic_dominances=self.monotonic_dominances, + joint_monotonicities=self.joint_monotonicities, + num_iterations=self.num_projection_iterations) + if self.enforce_strict_monotonicity: + w = lattice_lib.finalize_constraints( + w, + lattice_sizes=self.lattice_sizes, + monotonicities=canonical_monotonicities, + edgeworth_trusts=canonical_edgeworth_trusts, + trapezoid_trusts=canonical_trapezoid_trusts, + output_min=self.output_min, + output_max=self.output_max) + # TODO: come up with a better solution than separately applying + # bounds again after other projections. + if self.output_min is not None: + w = tf.maximum(w, self.output_min) + if self.output_max is not None: + w = tf.minimum(w, self.output_max) + return w + + def get_config(self): + """Standard Keras config for serialization.""" + return { + "lattice_sizes": self.lattice_sizes, + "monotonicities": self.monotonicities, + "unimodalities": self.unimodalities, + "edgeworth_trusts": self.edgeworth_trusts, + "trapezoid_trusts": self.trapezoid_trusts, + "monotonic_dominances": self.monotonic_dominances, + "joint_monotonicities": self.joint_monotonicities, + "output_min": self.output_min, + "output_max": self.output_max, + "num_projection_iterations": self.num_projection_iterations, + "enforce_strict_monotonicity": self.enforce_strict_monotonicity + } # pyformat: disable + + +class TorsionRegularizer(keras.regularizers.Regularizer): + # pyformat: disable + """Torsion regularizer for `tfl.lattice_layer.Lattice` layer. + + Lattice torsion regularizer penalizes how much the lattice function twists + from side-to-side (see + [publication](http://jmlr.org/papers/v17/15-243.html)). + + Consider a 3 x 2 lattice with weights `w`: + + ``` + w[3]-----w[4]-----w[5] + | | | + | | | + w[0]-----w[1]-----w[2] + ``` + + In this case, the torsion regularizer is defined as: + + ``` + l1 * (|w[4] + w[0] - w[3] - w[1]| + |w[5] + w[1] - w[4] - w[2]|) + + l2 * ((w[4] + w[0] - w[3] - w[1])^2 + (w[5] + w[1] - w[4] - w[2])^2) + ``` + + Attributes: + - All `__init__` arguments. + """ + # pyformat: enable + + def __init__(self, lattice_sizes, l1=0.0, l2=0.0): + """Initializes an instance of `TorsionRegularizer`. + + Args: + lattice_sizes: Lattice sizes of `tfl.lattice_layer.Lattice` to regularize. + l1: l1 regularization amount. Either single float or list or tuple of + floats to specify different regularization amount per dimension. The + amount of regularization for the interaction term between two dimensions + is the product of the corresponding per dimension amounts. + l2: l2 regularization amount. Either single float or list or tuple of + floats to specify different regularization amount per dimension. The + amount of regularization for the interaction term between two dimensions + is the product of the corresponding per dimension amounts. + """ + self.lattice_sizes = lattice_sizes + self.l1 = l1 + self.l2 = l2 + + def __call__(self, x): + """Returns regularization loss for `x`.""" + lattice_lib.verify_hyperparameters( + lattice_sizes=self.lattice_sizes, weights_shape=x.shape) + return lattice_lib.torsion_regularizer(x, self.lattice_sizes, self.l1, + self.l2) + + def get_config(self): + """Standard Keras config for serialization.""" + return { + "lattice_sizes": self.lattice_sizes, + "l1": self.l1, + "l2": self.l2, + } # pyformat: disable + + +class LaplacianRegularizer(keras.regularizers.Regularizer): + # pyformat: disable + """Laplacian regularizer for `tfl.lattice_layer.Lattice` layer. + + Laplacian regularizer penalizes the difference between adjacent vertices in + multi-cell lattice (see + [publication](http://jmlr.org/papers/v17/15-243.html)). + + Consider a 3 x 2 lattice with weights `w`: + + ``` + w[3]-----w[4]-----w[5] + | | | + | | | + w[0]-----w[1]-----w[2] + ``` + + where the number at each node represents the weight index. + In this case, the laplacian regularizer is defined as: + + ``` + l1[0] * (|w[1] - w[0]| + |w[2] - w[1]| + + |w[4] - w[3]| + |w[5] - w[4]|) + + l1[1] * (|w[3] - w[0]| + |w[4] - w[1]| + |w[5] - w[2]|) + + + l2[0] * ((w[1] - w[0])^2 + (w[2] - w[1])^2 + + (w[4] - w[3])^2 + (w[5] - w[4])^2) + + l2[1] * ((w[3] - w[0])^2 + (w[4] - w[1])^2 + (w[5] - w[2])^2) + ``` + + Attributes: + - All `__init__` arguments. + """ + # pyformat: enable + + def __init__(self, lattice_sizes, l1=0.0, l2=0.0): + """Initializes an instance of `LaplacianRegularizer`. + + Args: + lattice_sizes: Lattice sizes of `tfl.lattice_layer.Lattice` to regularize. + l1: l1 regularization amount. Either single float or list or tuple of + floats to specify different regularization amount per dimension. + l2: l2 regularization amount. Either single float or list or tuple of + floats to specify different regularization amount per dimension. + + Raises: + ValueError: If provided input does not correspond to `lattice_sizes`. + """ + lattice_lib.verify_hyperparameters( + lattice_sizes=lattice_sizes, + regularization_amount=l1, + regularization_info="l1") + lattice_lib.verify_hyperparameters( + lattice_sizes=lattice_sizes, + regularization_amount=l2, + regularization_info="l2") + self.lattice_sizes = lattice_sizes + self.l1 = l1 + self.l2 = l2 + + def __call__(self, x): + """Returns regularization loss for `x`.""" + lattice_lib.verify_hyperparameters( + lattice_sizes=self.lattice_sizes, weights_shape=x.shape) + return lattice_lib.laplacian_regularizer(x, self.lattice_sizes, self.l1, + self.l2) + + def get_config(self): + """Standard Keras config for serialization.""" + return { + "lattice_sizes": self.lattice_sizes, + "l1": self.l1, + "l2": self.l2 + } # pyformat: disable diff --git a/tensorflow_lattice/python/lattice_lib.py b/tensorflow_lattice/python/lattice_lib.py new file mode 100644 index 0000000..ad24cd6 --- /dev/null +++ b/tensorflow_lattice/python/lattice_lib.py @@ -0,0 +1,2142 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Implementation of algorithms required for Lattice layer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import copy +import itertools +import math +from absl import logging +import six + +import tensorflow as tf + + +def compute_interpolation_weights(inputs, + lattice_sizes, + clip_inputs=True): + """Computes weights for lattice interpolation. + + Running time: `O(batch_size * prod(lattice_sizes))` + + If `clip_inputs == True`, inputs outside of the range defined by + `lattice_sizes` will be clipped into the lattice input range. If not, the + corresponding weights will linearly approach 0.0 with input moving away from + the valid input range. + + Args: + inputs: Tensor of shape: `(batch_size, ..., len(lattice_sizes))` or list of + `len(lattice_sizes)` tensors of same shape `(batch_size, ..., 1)` which + represents points to apply lattice interpolation to. A typical shape is + `(batch_size, len(lattice_sizes))`. + lattice_sizes: List or tuple of integers which represents lattice sizes of + layer for which interpolation is being computed. + clip_inputs: Whether inputs should be clipped to the input range of the + lattice. + + Raises: + ValueError: If last dimension of `inputs` does not match `lattice_sizes`. + + Returns: + Interpolation weights tensor of shape: + `(batch_size, ..., prod(lattice_sizes))`. + """ + if isinstance(inputs, list): + input_shape = [tensor.shape for tensor in inputs] + input_dtype = inputs[0].dtype + else: + input_shape = inputs.shape + input_dtype = inputs.dtype + verify_hyperparameters(lattice_sizes=lattice_sizes, input_shape=input_shape) + + if clip_inputs: + inputs = _clip_onto_lattice_range(inputs=inputs, + lattice_sizes=lattice_sizes) + + # Create interpolation keypoints in advance in order to reuse them for all + # dimensions of same size. + dim_keypoints = {} + for dim_size in set(lattice_sizes): + dim_keypoints[dim_size] = tf.constant([i for i in range(dim_size)], + dtype=input_dtype) + + # Bucketize in order to share interpolation ops across consequtive dims of + # same size. + bucketized_inputs = _bucketize_consequtive_equal_dims( + inputs=inputs, lattice_sizes=lattice_sizes) + + one_d_interpolation_weights = [] + for tensor, bucket_size, dim_size in bucketized_inputs: + if bucket_size > 1: + # Within bucket all dims have same lattice sizes so instead of splitting + # before interpolation we split after interpolation. + # Expand dims in order to make interpolation through broadcasting work. + tensor = tf.expand_dims(tensor, axis=-1) + + # Broadcasting subtraction op. + distance = tf.abs(tensor - dim_keypoints[dim_size]) + # Following ops will do following: + # 1) if distance >= 1.0 then set interpolation weight to 0.0. + # 2) if distance < 1.0 then set interpolation weight to 1.0 - distance. + weights = 1.0 - tf.minimum(distance, 1.0) + + if bucket_size == 1: + one_d_interpolation_weights.append(weights) + else: + one_d_interpolation_weights.extend(tf.unstack(weights, axis=-2)) + + return batch_outer_operation(one_d_interpolation_weights, + operation=tf.multiply) + + +def batch_outer_operation(list_of_tensors, operation=tf.multiply): + """Computes outer operation of last dimensions of each of given tensors. + + Args: + list_of_tensors: List of tensors of same shape `(batch_size, ..., k[i])` + where everything expect `k_i` matches. + operation: Binary TF operation which supports broadcasting to be applied. + + Returns: + Tensor of shape: `(batch_size, ..., mul_i(k[i]))`. + """ + if len(list_of_tensors) == 1: + return list_of_tensors[0] + + # Dimensions of size '1' at position -1 of first tensor and -2 of second + # tensor will result in outer operation due to broadcasting. + result = tf.expand_dims(list_of_tensors[0], axis=-1) + + for i, tensor in enumerate(list_of_tensors[1:]): + result = operation(result, tf.expand_dims(tensor, axis=-2)) + + # For TF1 compatibility convert shape to integers allowing first dimension + # to be undefined. + # + # If we want to support arbitrary number of undefined dimensions we must + # compute new_shape using tf ops. It is undesireble because we want to + # minimize graph size. + shape = [-1] + [int(size) for size in result.shape[1:]] + + # Merge last 2 dimensions which we just multiplied. + new_shape = shape[:-2] + [shape[-2] * shape[-1]] + + # Since we are doing reshape anyway append 1 to prepare 'result' for + # following outer operation. + if i < len(list_of_tensors) - 2: + new_shape.append(1) + + result = tf.reshape(result, shape=new_shape) + return result + + +def _clip_onto_lattice_range(inputs, lattice_sizes): + """Clips inputs onto valid input range for given lattice_sizes. + + Args: + inputs: `inputs` argument of `compute_interpolation_weights`. + lattice_sizes: list or tuple of integers which represents lattice sizes to + clip onto. + + Returns: + Clipped `inputs`. + """ + if not isinstance(inputs, list): + upper_bounds = [dim_size - 1.0 for dim_size in lattice_sizes] + return tf.clip_by_value( + inputs, + clip_value_min=tf.zeros(shape=len(lattice_sizes), dtype=inputs.dtype), + clip_value_max=tf.constant(upper_bounds, + dtype=inputs.dtype)) + else: + # Share bound constant across dimensions of same size. + dim_upper_bounds = {} + for dim_size in set(lattice_sizes): + dim_upper_bounds[dim_size] = tf.constant(dim_size - 1.0, + dtype=inputs[0].dtype) + dim_lower_bound = tf.zeros(shape=[], dtype=inputs[0].dtype) + + clipped_inputs = [] + for one_d_input, dim_size in zip(inputs, lattice_sizes): + clipped_inputs.append( + tf.clip_by_value(one_d_input, + clip_value_min=dim_lower_bound, + clip_value_max=dim_upper_bounds[dim_size])) + return clipped_inputs + + +def _bucketize_consequtive_equal_dims(inputs, lattice_sizes): + """Groups consequite dimensions of same size together. + + For example `lattice_sizes == [2, 2, 2, 5, 5, 2]` produce 3 buckets: + - bucket of size 3 which corresponds to first group of dimensions of size 2. + - bucket of size 2 which corresponds to group of dimensions of size 5. + - bucket of size 1 which corresponds to last dimension of size 2. + If `inputs` is a single tensor then it will be split accordig to buckets. + + If `inputs` is a list of tensor then all buckets will be of size 1 regardless + of lattice sizes in order to avoid merging tensors. In this case function acts + merely as a convenience helper to unify output format. + + Args: + inputs: `inputs` argument of `compute_interpolation_weights`. + lattice_sizes: list or tuple of integers which represents lattice sizes. + + Returns: + Iterable of tuples: `(tensor, bucket_size, bucket_dim_size)` where + `tensor.shape[-1] == bucket_size` and `bucket_dim_size` is a lattice size + which corresponds to bucket. + """ + if not isinstance(inputs, list): + bucket_sizes = [] + bucket_dim_sizes = [] + current_size = 1 + for i in range(1, len(lattice_sizes)): + if lattice_sizes[i] != lattice_sizes[i-1]: + bucket_sizes.append(current_size) + bucket_dim_sizes.append(lattice_sizes[i-1]) + current_size = 1 + else: + current_size += 1 + bucket_sizes.append(current_size) + bucket_dim_sizes.append(lattice_sizes[-1]) + inputs = tf.split(inputs, num_or_size_splits=bucket_sizes, axis=-1) + else: + # TODO: run benchmark and figure out whether it make sense to merge + # indiviaul tensors here. + bucket_sizes = [1] * len(lattice_sizes) + bucket_dim_sizes = lattice_sizes + return zip(inputs, bucket_sizes, bucket_dim_sizes) + + +def linear_initializer(lattice_sizes, + output_min, + output_max, + monotonicities=None, + unimodalities=None, + units=1, + dtype=tf.float32): + """Returns a lattice layer weight tensor that represents a linear function. + + - The linear function will have positive coefficients for monotonic dimensions + and 0 otherwise. If all dimensions are unconstrained, all coefficients will + be positive. + - Linear coefficients are set such that the minimum/maximum output of the + lattice matches the given output_min/output_max. + - Each monotonic dimension contributes with same weight regardless of number + of vertices per dimension. + - No dimension can be both monotonic and unimodal. + - Unimodal dimensions contribute with same weight as monotonic dimensions. + - Unimodal dimensions linearly decrease for first `(dim_size + 1) // 2` + vertices and then linearly increase for following vertices. + + Args: + lattice_sizes: List or tuple of integers which represents lattice sizes. + output_min: Minimum output of lattice layer after initialization. + output_max: Maximum output of lattice layer after initialization. + monotonicities: None or list or tuple of same length as lattice_sizes of {0, + 1} which represents monotonicity constraints per dimension. 1 stands for + increasing (non-decreasing in fact), 0 for no monotonicity constraints. + unimodalities: None or list or tuple of same length as lattice_sizes of {0, + 1} which represents unimodality constraints per dimension. 1 stands for + unimodal dimension, 0 for no unimodality constraints. + units: Output dimension of the layer. Each of units lattices will be + initialized identically. + dtype: dtype. + + Returns: + Lattice weights tensor of shape: `(prod(lattice_sizes), units)`. + """ + verify_hyperparameters( + lattice_sizes=lattice_sizes, + monotonicities=monotonicities, + unimodalities=unimodalities) + if monotonicities is None: + monotonicities = [0] * len(lattice_sizes) + if unimodalities is None: + unimodalities = [0] * len(lattice_sizes) + + num_constraint_dims = count_non_zeros(monotonicities, unimodalities) + if num_constraint_dims == 0: + monotonicities = [1] * len(lattice_sizes) + num_constraint_dims = len(lattice_sizes) + + dim_range = float(output_max - output_min) / num_constraint_dims + one_d_weights = [] + + for monotonicity, unimodality, dim_size in zip(monotonicities, unimodalities, + lattice_sizes): + if monotonicity != 0: + one_d = _linspace(start=0.0, stop=dim_range, num=dim_size) + elif unimodality != 0: + decreasing = _linspace(start=dim_range, stop=0.0, num=(dim_size + 1) // 2) + increasing = _linspace(start=0.0, stop=dim_range, num=(dim_size + 1) // 2) + # For odd size dimensions we want just 1 lowest point. For even sized we + # want 2. + one_d = decreasing + increasing[dim_size % 2:] + else: + one_d = [0.0] * dim_size + # Insert batch dim of size 1 at the beginning for batch_outer_operation. + one_d_weights.append(tf.constant(one_d, dtype=dtype, shape=[1, dim_size])) + + # Use same implementation of outer operation as interpolation logic in order + # to guarantee same weights order. + weights = batch_outer_operation(one_d_weights, operation=tf.add) + weights = tf.reshape(weights + output_min, shape=[-1, 1]) + if units > 1: + weights = tf.tile(weights, multiples=[1, units]) + return weights + + +def _linspace(start, stop, num): + """Returns `num` uniformly spaced floats between `start` and `stop`.""" + if num == 1: + return [start] + return [start + (stop - start) * i / (num - 1.0) for i in range(num)] + + +# TODO: Add final projection for unimodality constraints. +def _approximately_project_monotonicity(weights, lattice_sizes, monotonicities): + """Approximately projects to strictly meet monotonicity constraints. + + Algorithm details: + + Definition: + A[i] refer to i-th coordinate of vertex A. + For 2 vertices A and B: + "A

= weight[X] for any vertex X that: X

= max(weight[4, 5, 8, 9, 10]) + Or: + weight[6] <= min(weight[2, 3, 7]) + + Given the above definition, we can use either of the following update rules to + approximately project into the feasible space: + max_proj[V] = max(weight[X]) for any X that: X

0: + for i in range(0, lattice_sizes[main_dim] - 1): + for j in range(0, lattice_sizes[cond_dim] - 1): + difference_in_slopes = ((layers[i + 1][j] - layers[i][j]) - + (layers[i + 1][j + 1] - layers[i][j + 1])) + # Move all weights by the value of the biggest violation to both + # satisfy this constraint and not hurt others. See function comments + # for more details. + max_violation = tf.maximum(tf.reduce_max(difference_in_slopes), 0) + layers[i + 1][j + 1] += max_violation + else: + for i in range(lattice_sizes[main_dim] - 2, -1, -1): + for j in range(lattice_sizes[cond_dim] - 2, -1, -1): + difference_in_slopes = ((layers[i + 1][j + 1] - layers[i][j + 1]) - + (layers[i + 1][j] - layers[i][j])) + max_violation = tf.maximum(tf.reduce_max(difference_in_slopes), 0) + layers[i][j] -= max_violation + trust_projection = _stack_2d(layers, main_dim, cond_dim) + + return trust_projection + + +# TODO: It is likely that this algorithm will work for all trapezoid +# trust constraints without needing the reduce_max, as long as there are no +# edgeworth constraints. If true, consider using that approach when possible. +def _approximately_project_trapezoid(weights, lattice_sizes, trapezoid_trusts, + edgeworth_trusts): + """Approximately projects to strictly meet all trapezoid trust constraints. + + Note that this function will not introduce violations to any + previously-satisfied monotonicity or edgeworth constraints. + + Algorithm details: + + For a constraint on main dimension i and conditional dimension j, consider + some slice of weights that is fixed along all other dimensions, leaving a grid + + ``` + 0---1---2---3 + | | | | + 4---5---6---7 + | | | | + 8---9---10--11 + ``` + + You can think of all the other dimensions as other such grids stacked behind + this one, e.g. weight[8] and the points behind it are all such points with + index 0 in the i'th and j'th dimensions, and weight[6] and the points behind + it are all such points with index 2 in the i'th dimension and index 1 in the + j'th. + + We project to trapezoid trust on this grid by working up both edges of + the lattice and only ever decreasing weights on the low main_feature side and + increasing weights on the high main_feature side. In the above example, we + would first consider the pair {8, 4} and update weight 4 to be min(8, 4), + before then looking at {4, 0} and updating 0 to be min(4, 0). Similarly set + weight 7 to be max(7, 11) and then weight 3 to max(3, 7). Flip the orders if + cond_direction is -1: work down instead of up. + + Unlike in the edgeworth trust case, we do not necessarily look 'behind' the + page and update all points behind a given grid point by the maximum violation + at each step. It turns out that while this does have the nice property of + maintaining almost all types of edgeworth constraints, for the same reason + that the edgeworth algorithm does (co-movement of weights involved in other + constraints), it can actually break other trapezoid constraints, namely those + which share the same conditional feature. + + There is one exception, which is the matching edgeworth trust constraint. In + this case, the trapezoid updates only touch one corner of each edgeworth + constraint and so can violate them. The solution is to update by the max of + all violations behind the page and all violations encountered below in the + grid. + + If you separately update each grid by the violations in that grid, this update + procedure turns out to respect all trapezoid constraints. The rationale is a + bit more subtle than in the edgeworth case. The basic idea is that since each + trapezoid and monotonicity constraint operates on two weights that are next to + each other (i.e. differ only in the index of one dimension), we can create + a 'square' of points in which one edge goes across the constraint we want to + maintain and the perpendicular edges go across the constraint we are updating. + + For example, consider the 4 weights + + ``` + A -- B + | | + C -- D + ``` + + A/B and C/D differ in the same one index (the constraint we hope to maintain) + while A/C and B/D differ across the conditional index of the trapezoid + constraint we are updating. Say we are focused on whether we maintain A'<=B' + (A' is A after imposing trapezoid trust) and we are operating on the 'min main + feature' side of the lattice so that any updates that occur will lower + weights. If B'=B after trapezoid trust, things are easy because A'<=A by 'min + main feature' and A<=B by the preexisting constraint. If not, and B' 1: + lattice_sizes = lattice_sizes + [int(units)] + if monotonicities: + monotonicities = monotonicities + [0] + + weights = tf.reshape(weights, shape=lattice_sizes) + + weights = _approximately_project_monotonicity(weights, lattice_sizes, + monotonicities) + if edgeworth_trusts or trapezoid_trusts: + weights = _approximately_project_edgeworth(weights, lattice_sizes, + edgeworth_trusts) + weights = _approximately_project_trapezoid(weights, lattice_sizes, + trapezoid_trusts, + edgeworth_trusts) + # Simple capping, applied in a later step, adds less distortion than this + # scaling projection; however, it could violate trust constraints. + weights = _approximately_project_bounds(weights, output_min, output_max) + return tf.reshape(weights, shape=[-1, units]) + + +# TODO: approach used to implement regluarizers is likely to be more +# efficient than one used here. Especially on TPU. Investigate it. +def _project_partial_monotonicity(weights, lattice_sizes, monotonicities, + unimodalities, dimension, constraint_group): + """Applies exact monotonicity projection to a subset of a single dimension. + + Algorithm details: + + In order to project into k constrained dimensions we split all constraints + into 2k sets in such way that within each sets all constraints are + independent. These 2k sets are chosen in such way that for each constrained + dimension we have 2 sets of constraints: even and odd constraints according to + index of smallest vertex in constraint. We apply Dykstra's algorithm to these + sets handling each individual constraint within each set independently. + + This function in particular, then, operates on one of these independent sets, + as defined by a specific dimension and constraint group: 0 for the even + constraints and 1 for the odd constraints. + + Note that in case of just 2 lattice vertices per dimension odd set for that + dimension will be empty. + + * k constrained dimensions projection: + If we know how to project into single constrained dimension then we can use + Dykstra algorithm to project into union of all k constrained dimensions. + + * Single constrained dimension projection: + For single dimension projection we have multiple independent 1-d sequences of + constrained weights of same length. + For example 2 x 6 lattice with monotonicity along 2-nd dimension: + + ``` + 0--<--1--<--2--<--3--<--4--<--5 + | | | | | | + 6--<--7--<--8--<--9--<--10-<--11 + ``` + + we have 2 independent rows of constraints. It's clear that both rows can be + projected independently. + + To project 1 row, we can again apply Dykstra's algorithm splitting all + constraints into two sets: constraints with odd indices and constraints with + even indices. For example for first row: + - even constraints set: {0 < 1, 2 < 3, 4 < 5} + - odd constraints set: {1 < 2, 3 < 4} + + Within each set no constraints interact with each other so we can project + every individual constraint independently. + + * Individual constraint projection: + Constraint weight[0] <= weight[1]: + - weight[0] = min(weight[0], (weight[0] + weight[1]) / 2) + - weight[1] = max(weight[1], (weight[0] + weight[1]) / 2) + + Differs from _approximately_project_monotonicity in that this algorithm + - Only operates on a single dimension. + - Does not guarantee an satisfying solution to the full monotonicity + constraint. + - Exactly projects (in L2 terms) on the subset of constraints it does + operate on. + + Args: + weights: Tensor with weights of lattice layer, with shape lattice_sizes. + lattice_sizes: List or tuple of integers which represents lattice sizes. + which correspond to weights. + monotonicities: None or list or tuple of same length as lattice_sizes of {0, + 1} which represents monotonicity constraints per dimension. 1 stands for + increasing (non-decreasing in fact), 0 for no monotonicity constraints. + unimodalities: None or list or tuple of same length as lattice_sizes of {0, + 1} which represents unimodality constraints per dimension. 1 stands for + unimodal dimension, 0 for no unimodality constraints. + dimension: Index of feature to which we are applying constraints. + constraint_group: 0 or 1 as defined above, representing whether we are + operating on 'even' or 'odd' constraints. + + Returns: + Tensor with projected weights matching shape of input weights. + + Raises: + ValueError: If provided dimension has no monotonicity or unimodality + constraint associated with it. + """ + + if monotonicities[dimension] == 0 and unimodalities[dimension] == 0: + raise ValueError( + "Trying to project onto unconstrained dimension. Dimension: " % + (dimension)) + + layers = tf.unstack(weights, axis=dimension) + for i in range(constraint_group, lattice_sizes[dimension] - 1, 2): + # Project individual independent constraints. + average = (layers[i] + layers[i + 1]) / 2.0 + if (monotonicities[dimension] == 1 or + (unimodalities[dimension] == 1 and i >= lattice_sizes[dimension] // 2)): + layers[i] = tf.minimum(layers[i], average) + layers[i + 1] = tf.maximum(layers[i + 1], average) + else: + layers[i] = tf.maximum(layers[i], average) + layers[i + 1] = tf.minimum(layers[i + 1], average) + + return tf.stack(layers, axis=dimension) + + +def _project_partial_edgeworth(weights, lattice_sizes, edgeworth_trust, + constraint_group): + """Applies exact edgeworth trust projection to a subset of one constraint. + + Algorithm details: + + For the Edgeworth trust projection, we follow a similar approach to the + monotonicity projection by splitting up the constraints into independent sets. + Here, each trust constraint touches every lattice vertex, but can be broken up + into 4 independent sets of constraints, based on whether the constraint's + smaller indices along the main and conditional dimensions are even or odd. + That leaves us with 4t sets of constraints if we have t trust constraints, + which we can sequentially project onto with the Dykstra's algorithm. + + This function applies to a single set of independent constraints within a + single trust constraint. The constraint group can take the value (0,0), (0,1), + (1,0), or (1,1) corresponding to even (0) or odd (1) for the main and + conditional dimensions, respectively. + + * k trust constraints projection: + If we know how to project into single trust constraint then we can use + Dykstra algorithm to project into union of all k trust constraints. + + * Single trust constraint projection: + Edgeworth constraints require the difference in weights across the main + feature to be larger when the conditional feature is higher. We can think of + this as separate constraints applied to each 'square' of weights {(i,j,...), + (i+1,j,...), (i,j+1,...), (i+1,j+1,...), where i and j denote the index + dimensions of the main and conditional features and the ellipses represent + a fixed value of the other feature dimensions. It is immediately clear that + we can apply the constraint at the same time for different values of the + other dimensions. Considering then a fixed slice, and a grid + + ``` + 0---1---2---3 + | | | | + 4---5---6---7 + | | | | + 8---9---10--11 + | | | | + 12--13--14--15 + ``` + + we get our four independent sets by considering non-overlapping squares of + constraints. In particular, we define the sets by the combination of even & + odd starting indices in each dimension. So if we start our indexing at the + top-left, the even/even set would be the four squares {0,1,4,5}, {2,3,6,7}, + {8,9,12,13}, and {10,11,14,15}, the even/odd set would be {4,5,8,9} and + {6,7,10,11} and so on. + + * Individual weight projection: + Within each square the projection moves each of the four weights by the + constraint violation / 4, if necessary, increasing the gap between high-trust + weights across the main feature and decreasing the gap between low-trust + weights across the main feature. + + Differs from _approximately_project_edgeworth in that this algorithm + - Only operates on the constraints for a single (main_dim, cond_dim) pair. + - Does not guarantee a satisfying solution to the full trust constraint. + - Exactly projects (in L2 terms) on the subset of constraints it does + operate on. + + Args: + weights: Tensor with weights of lattice layer, with shape lattice_sizes. + lattice_sizes: List or tuple of integers which represents lattice sizes. + which correspond to weights. + edgeworth_trust: Three-element tuple representing a single trust constraint. + First element is the index of the main (monotonic) feature. Second element + is the index of the conditional feature. Third element is the direction of + trust set to 1 if higher values of the conditional feature increase trust + and -1 otherwise. + constraint_group: Two-element tuple of 0s and 1s as defined above, + representing the combination of 'even' and 'odd' constraints we are + projecting on. + + Returns: + Tensor with projected weights matching shape of input weights. + """ + + main_dim, cond_dim, cond_direction = edgeworth_trust + layers = _unstack_2d(weights, main_dim, cond_dim) + + if cond_direction < 0: + layers = _reverse_second_list_dimension(layers) + for i in range(constraint_group[0], lattice_sizes[main_dim] - 1, 2): + for j in range(constraint_group[1], lattice_sizes[cond_dim] - 1, 2): + difference_in_slopes = ((layers[i + 1][j] - layers[i][j]) - + (layers[i + 1][j + 1] - layers[i][j + 1])) + correction = tf.maximum(difference_in_slopes / 4, 0) + layers[i][j] += correction + layers[i][j + 1] -= correction + layers[i + 1][j] -= correction + layers[i + 1][j + 1] += correction + if cond_direction < 0: + layers = _reverse_second_list_dimension(layers) + + return _stack_2d(layers, main_dim, cond_dim) + + +def _project_partial_trapezoid(weights, lattice_sizes, trapezoid_trust, + constraint_group): + """Applies exact trapezoid trust projection to a subset of one constraint. + + Algorithm details: + + For the trapezoid trust projection, each trust constraint touches every + lattice vertex, but can be broken up into 2 independent sets of constraints, + based on whether the constraint's smaller index along the conditional + dimension is even or odd. That leaves us with 2t sets of constraints if we + have t trust constraints, which we can sequentially project onto with the + Dykstra algorithm. + + This function applies to a single set of independent constraints within a + single trust constraint. The constraint group can take the value 0 or 1, + corresponding to even (0) or odd (1) for conditional dimension index. + + * k trust constraints projection: + If we know how to project into single trust constraint then we can use + Dykstra algorithm to project into union of all k trust constraints. + + * Single trust constraint projection: + Trapezoid constraints require the range of possible model outputs across the + main feature to be larger when the conditional feature demonstrates higher + trust in the main feature. That is, they constrain the 'extreme' (minimum and + maximum) weights in the main feature dimension but not any of the weights in + the middle if the lattice size is larger than 2. We therefore have one set of + constraints along the conditional dimension when the main feature is at its + minimum and one when the main feature is at its maximum. For example, consider + the grid + + ``` + 0---1---2---3 + | | | | + 4---5---6---7 + | | | | + 8---9---10--11 + | | | | + 12--13--14--15 + ``` + + If the main feature is on the x-axis and the conditional feature is on the y- + axis in this grid, our constraints operate on {0,4,8,12} and {3,7,11,15}. In + fact, those constraints are simply monotonicity constraints in opposite + directions. If the cond_direction = 1, we are monotonically decreasing between + 12 and 0 (0 < 4 < 8 < 12) and monotonically increasing between 15 and 3 + (3 > 7 > 11 > 15). Note that these imply that [0,3] is a superset of [4,7] and + so on down to the smallest subset [12,15]. Our two independent sets of these + constraints match those for monotonicity based on even and odd indices. For + example, [8 < 12], [4 < 0], [11 > 15], and [3 > 7] can be projected onto at + once, while [4 < 8] and [7 > 11] are in the other group. All constraint + directions are flipped if cond_direction = -1. + + * Individual weight projection: + For each pair of constraints, we project as in monotonicity: each weight moves + halfway towards each other if the constraint is being violated, and stays the + same otherwise. + + Differs from _approximately_project_trapezoid in that this algorithm + - Only operates on the constraints for a single (main_dim, cond_dim) pair. + - Does not guarantee a satisfying solution to the full trust constraint. + - Exactly projects (in L2 terms) on the subset of constraints it does + operate on. + + Args: + weights: Tensor with weights of lattice layer, with shape lattice_sizes. + lattice_sizes: List or tuple of integers which represents lattice sizes. + which correspond to weights. + trapezoid_trust: Three-element tuple representing a single trust constraint. + First element is the index of the main (monotonic) feature. Second element + is the index of the conditional feature. Third element is the direction of + trust set to 1 if higher values of the conditional feature increase trust + and -1 otherwise. + constraint_group: 0 or 1 as defined above, representing whether we are + acting on even or odd indices + + Returns: + Tensor with projected weights matching shape of input weights. + """ + + main_dim, cond_dim, cond_direction = trapezoid_trust + layers = _unstack_2d(weights, main_dim, cond_dim) + + max_main_dim = lattice_sizes[main_dim] - 1 + if cond_direction < 0: + layers = _reverse_second_list_dimension(layers) + for j in range(constraint_group, lattice_sizes[cond_dim] - 1, 2): + lhs_difference = layers[0][j + 1] - layers[0][j] + lhs_correction = tf.maximum(lhs_difference / 2, 0) + layers[0][j] += lhs_correction + layers[0][j + 1] -= lhs_correction + + rhs_difference = layers[max_main_dim][j] - layers[max_main_dim][j + 1] + rhs_correction = tf.maximum(rhs_difference / 2, 0) + layers[max_main_dim][j] -= rhs_correction + layers[max_main_dim][j + 1] += rhs_correction + if cond_direction < 0: + layers = _reverse_second_list_dimension(layers) + + return _stack_2d(layers, main_dim, cond_dim) + + +def _project_partial_monotonic_dominance(weights, lattice_sizes, + monotonic_dominance, constraint_group): + r"""Applies exact monotonic dominance projection to given constraint group. + + Algorithm details: + + For the monotonic dominance projection, we follow a similar approach to the + monotonicity projection by splitting up the constraints into independent sets. + Here, each dominance constraint can be broken up into 8 independent sets of + constraints, based on (1) whether the constraint's smaller indices along the + dominant and weak dimensions are even or odd and (2) two triplets of vertices + to consider for each square in the grid shown below. + + That leaves us with 8k sets of constraints if we have k dominance constraints, + which we can sequentially project onto with the Dykstra algorithm. + + This function applies to a single set of independent constraints within a + single dominance constraint group. The constraint group can take the value + {0,1} x {0,1} x {0,1}. Even (0) or odd (1) of the first two elements + correspond to the dominant and weak features and the third element determines + which of the two triplets within a square to consider. + + * k monotonic dominance constraints projection: + If we know how to project into single monotonic dominance constraint then we + can use Dykstra algorithm to project into union of all k dominance + constraints. + + * Single monotonic dominance constraint projection + Monotonic dominance constraints require the effect (slope) in the direction + of the dominant dimension to be greater than that of the weak dimension for + any point in the lattice. We can think of this as separate constraints applied + to each 'triangle' of weights represented as either {(i,j,...), (i+1,j,...), + (i+1,j+1,...)} or {(i,j,...), (i,j+1,...), (i+1,j+1,...)} where i and j denote + the index dimensions of the dominant and weak features and the ellipses + represent a fixed value of the other feature dimensions. Considering then a + fixed slice, and a grid + + ``` + 0---1---2---3 + | \ | \ | \ | + 4---5---6---7 + | \ | \ | \ | + 8---9---10--11 + | \ | \ | \ | + 12--13--14--15 + ``` + + where the dominant feature is on the x-axis and the weak feature is on the + y-axis, we get our 8 independent sets of non-overlapping triangular triplets + of vertices. For example, one set consists of {(0,1,4), (8,9,12), (2,3,6), + (10,11,14)}. + + * Individual weight projection + Within each triangular triplet, the projection moves the weight of the right + angled vertex, either top-right or bottom-left, by 2 * violation / 3 and the + other two vertices by violation / 3 to satisfy the constraint while minimizing + the L2 distance from the initial point. + + Args: + weights: tensor with weights of lattice layer, with shape lattice_sizes. + lattice_sizes: list or tuple of integers which represents lattice sizes + which correspond to weights. + monotonic_dominance: two-element tuple representing a single monotonic + dominance constraint. First element is the index of the dominant feature. + Second element is the index of the weak feature. + constraint_group: three-element tuple as defined above, representing 'even' + or 'odd' indices and which of the two triangles we are acting on. + + Returns: + Tensor with projected weights matching shape of input weights. + """ + + dominant_dim, weak_dim = monotonic_dominance + layers = _unstack_2d(weights, dominant_dim, weak_dim) + for i in range(constraint_group[0], lattice_sizes[dominant_dim] - 1, 2): + for j in range(constraint_group[1], lattice_sizes[weak_dim] - 1, 2): + midpoint = (layers[i][j] + layers[i + 1][j + 1]) / 2 + if constraint_group[2] == 1: + difference = midpoint - layers[i + 1][j] + correction = tf.maximum(difference / 3, 0) + layers[i + 1][j] += 2 * correction + else: + difference = midpoint - layers[i][j + 1] + correction = tf.minimum(difference / 3, 0) + layers[i][j + 1] += 2 * correction + layers[i][j] -= correction + layers[i + 1][j + 1] -= correction + + return _stack_2d(layers, dominant_dim, weak_dim) + + +def _project_partial_joint_monotonicity(weights, lattice_sizes, + joint_monotonicity, constraint_group): + """Applies exact joint monotonicity projection to given constraint group. + + Algorithm details: + + For the joint monotonicity projection, we follow a similar approach to the + per-dimension monotonicity projection by splitting up the constraints into + independent sets. Here, each joint monotonicity constraint can be broken up + into 8 independent sets of constraints, based on (1) whether the constraint's + smaller indices along the two given dimensions are even or odd and (2) two + triplets of vertices to consider for each square in the grid shown below. + + That leaves us with 8k sets of constraints if we have k joint monotonocity + constraints, which we can sequentially project onto with the Dykstra + algorithm. + + This function applies to a single set of independent constraints within a + single joint monotonicity constraint. The constraint group can take the value + {0,1} x {0,1} x {0,1}. Even (0) or odd (1) of the first two elements + correspond to the two features that are jointly monotonic and the third + element determines which of the two triplets within in a square to consider. + + * k joint monotonicity constraints projection: + If we know how to project into single joint monotonicity constraint then we + can use Dykstra algorithm to project into union of all k joint monotonicity + constraints. + + * Single joint monotonicity constraint projection + Joint monotonicity constraints require the function to be monotonic along a + diagonal direction of a two-feature subspace, ceteris paribus all other + features. The sum of the partial derivatives on the constraint features needs + to be non-negative. We can think of this as separate constraints applied to + each 'triangle' of weights represented as either {(i,j,...), (i+1,j,...), + (i,j+1,...)} or {(i+1,j+1,...), (i+1,1,...), (i,j+1,...)} where i and j + denote the index dimensions of the two features and the ellipses represent a + fixed value of the other feature dimensions. Considering then a fixed slice, + and a grid + + ``` + 0---1---2---3 + | / | / | / | + 4---5---6---7 + | / | / | / | + 8---9---10--11 + | / | / | / | + 12--13--14--15 + ``` + + we get our 8 independent sets of non-overlapping triangular triplets of + vertices. For example, one set consists of {(0,1,4}, (8,9,12), (2,3,6), + (10,11,14)}. + + * Individual weight projection + Within each triangular triplet, the projection moves the weight of the right + angled vertex, either top-left or bottom-right, by 2 * violation / 3 and the + other two vertices by violation / 3 to satisfy the constraint while minimizing + the L2 distance from the initial point. + + Args: + weights: tensor with weights of lattice layer, with shape lattice_sizes. + lattice_sizes: list or tuple of integers which represents lattice sizes + which correspond to weights. + joint_monotonicity: two-element tuple representing a single joint + monotonicity constraint. The two elements are the index of the two + constrained features. + constraint_group: three-element tuple as defined above, representing the + combination of 'even' and 'odd' constraints we are projecting on. + + Returns: + Tensor with projected weights matching shape of input weights. + """ + + dim1, dim2 = joint_monotonicity + layers = _unstack_2d(weights, dim1, dim2) + for i in range(constraint_group[0], lattice_sizes[dim1] - 1, 2): + for j in range(constraint_group[1], lattice_sizes[dim2] - 1, 2): + midpoint = (layers[i + 1][j] + layers[i][j + 1]) / 2 + if constraint_group[2] == 1: + difference = midpoint - layers[i + 1][j + 1] + correction = tf.maximum(difference / 3, 0) + layers[i + 1][j + 1] += 2 * correction + else: + difference = midpoint - layers[i][j] + correction = tf.minimum(difference / 3, 0) + layers[i][j] += 2 * correction + layers[i + 1][j] -= correction + layers[i][j + 1] -= correction + + return _stack_2d(layers, dim1, dim2) + + +# TODO: Test whether adding min/max capping to dykstra projection would +# improve performance. +def project_by_dykstra(weights, + lattice_sizes, + monotonicities=None, + unimodalities=None, + edgeworth_trusts=None, + trapezoid_trusts=None, + monotonic_dominances=None, + joint_monotonicities=None, + num_iterations=1): + """Applies dykstra's projection algorithm for monotonicity/trust constraints. + + - Returns honest projection with respect to L2 norm if num_iterations is inf. + - Monotonicity will be violated by some small eps(num_iterations). + - Complexity: O(num_iterations * (num_monotonic_dims + num_trust_constraints) + * num_lattice_weights) + + Dykstra's alternating projections algorithm projects into intersection of + several convex sets. For algorithm description itself use Google or Wiki: + https://en.wikipedia.org/wiki/Dykstra%27s_projection_algorithm + + Here, each monotonicity constraint is split up into 2 independent convex sets + each trust constraint is split up into 4 independent convex sets. These sets + are then projected onto exactly (in L2 space). For more details, see the + _project_partial_* functions. + + Args: + weights: `Lattice` weights tensor of shape: `(prod(lattice_sizes), units)`. + lattice_sizes: list or tuple of integers which represents lattice sizes. + which correspond to weights. + monotonicities: None or list or tuple of same length as lattice_sizes of {0, + 1} which represents monotonicity constraints per dimension. 1 stands for + increasing (non-decreasing in fact), 0 for no monotonicity constraints. + unimodalities: None or list or tuple of same length as lattice_sizes of {0, + 1} which represents unimodality constraints per dimension. 1 stands for + unimodal dimension, 0 for no unimodality constraints. + edgeworth_trusts: None or iterable of three-element tuples. First element is + the index of the main (monotonic) feature. Second element is the index of + the conditional feature. Third element is the direction of trust: 1 if + higher values of the conditional feature should increase trust in the + main feature and -1 otherwise. + trapezoid_trusts: None or iterable of three-element tuples. First element is + the index of the main (monotonic) feature. Second element is the index of + the conditional feature. Third element is the direction of trust: 1 if + higher values of the conditional feature should increase trust in the + main feature and -1 otherwise. + monotonic_dominances: None or iterable of two-element tuples. First element + is the index of the dominant feature. Second element is the index of the + weak feature. + joint_monotonicities: None or iterable of two-element tuples. Each tuple + represents a pair of feature indices that require joint monotoniticity. + num_iterations: number of iterations of Dykstra's algorithm. + + Returns: + Projected weights tensor of same shape as `weights`. + """ + if ((count_non_zeros(monotonicities, unimodalities) == 0 and + not joint_monotonicities) or + num_iterations == 0): + return weights + + units = weights.shape[1] + if monotonicities is None: + monotonicities = [0] * len(lattice_sizes) + if unimodalities is None: + unimodalities = [0] * len(lattice_sizes) + if edgeworth_trusts is None: + edgeworth_trusts = [] + if trapezoid_trusts is None: + trapezoid_trusts = [] + if monotonic_dominances is None: + monotonic_dominances = [] + if joint_monotonicities is None: + joint_monotonicities = [] + if units > 1: + lattice_sizes = lattice_sizes + [int(units)] + monotonicities = monotonicities + [0] + unimodalities = unimodalities + [0] + + weights = tf.reshape(weights, lattice_sizes) + + def body(iteration, weights, last_change): + """Body of the tf.while_loop for Dykstra's projection algorithm. + + This implements Dykstra's projection algorithm and requires rolling back + the last projection change. + + Args: + iteration: Iteration counter tensor. + weights: Tensor with project weights at each iteraiton. + last_change: Dict that stores the last change in the weights after + projecting onto the each subset of constraints. + + Returns: + The tuple (iteration, weights, last_change) at the end of each iteration. + """ + last_change = copy.copy(last_change) + for dim in range(len(lattice_sizes)): + if monotonicities[dim] == 0 and unimodalities[dim] == 0: + continue + + for constraint_group in [0, 1]: + # Iterate over 2 sets of constraints per dimension: even and odd. + # Odd set exists only when there are more than 2 lattice vertices. + if constraint_group + 1 >= lattice_sizes[dim]: + continue + + # Rolling back last projection into current set as required by Dykstra's + # algorithm. + rolled_back_weights = weights - last_change[("MONOTONICITY", dim, + constraint_group)] + weights = _project_partial_monotonicity(rolled_back_weights, + lattice_sizes, monotonicities, + unimodalities, dim, + constraint_group) + last_change[("MONOTONICITY", dim, + constraint_group)] = weights - rolled_back_weights + + for constraint in edgeworth_trusts: + main_dim, cond_dim, _ = constraint + for constraint_group in [(0, 0), (0, 1), (1, 0), (1, 1)]: + if (constraint_group[0] >= lattice_sizes[main_dim] - 1 or + constraint_group[1] >= lattice_sizes[cond_dim] - 1): + continue + + rolled_back_weights = ( + weights - last_change[("EDGEWORTH", constraint, constraint_group)]) + weights = _project_partial_edgeworth(rolled_back_weights, lattice_sizes, + constraint, constraint_group) + last_change[("EDGEWORTH", constraint, + constraint_group)] = weights - rolled_back_weights + + for constraint in trapezoid_trusts: + _, cond_dim, _ = constraint + for constraint_group in [0, 1]: + if constraint_group >= lattice_sizes[cond_dim] - 1: + continue + + rolled_back_weights = ( + weights - last_change[("TRAPEZOID", constraint, constraint_group)]) + weights = _project_partial_trapezoid(rolled_back_weights, lattice_sizes, + constraint, constraint_group) + last_change[("TRAPEZOID", constraint, + constraint_group)] = weights - rolled_back_weights + + for constraint in monotonic_dominances: + dominant_dim, weak_dim = constraint + for constraint_group in itertools.product([0, 1], [0, 1], [0, 1]): + if (constraint_group[0] >= lattice_sizes[dominant_dim] - 1 or + constraint_group[1] >= lattice_sizes[weak_dim] - 1): + continue + + rolled_back_weights = weights - last_change[("MONOTONIC_DOMINANCE", + constraint, + constraint_group)] + weights = _project_partial_monotonic_dominance(rolled_back_weights, + lattice_sizes, + constraint, + constraint_group) + last_change[("MONOTONIC_DOMINANCE", constraint, + constraint_group)] = weights - rolled_back_weights + + for constraint in joint_monotonicities: + dim1, dim2 = constraint + for constraint_group in itertools.product([0, 1], [0, 1], [0, 1]): + if (constraint_group[0] >= lattice_sizes[dim1] - 1 or + constraint_group[1] >= lattice_sizes[dim2] - 1): + continue + + rolled_back_weights = weights - last_change[("JOINT_MONOTONICITY", + constraint, + constraint_group)] + weights = _project_partial_joint_monotonicity(rolled_back_weights, + lattice_sizes, constraint, + constraint_group) + last_change[("JOINT_MONOTONICITY", constraint, + constraint_group)] = weights - rolled_back_weights + return iteration + 1, weights, last_change + + def cond(iteration, weights, last_change): + del weights, last_change + return tf.less(iteration, num_iterations) + + # Run the body of the loop once to find required last_change keys. The set of + # keys in the input and output of the body of tf.while_loop must be the same. + # The resulting ops are discarded and will not be part of the TF graph. + zeros = tf.zeros(shape=lattice_sizes, dtype=weights.dtype) + last_change = collections.defaultdict(lambda: zeros) + (_, _, last_change) = body(0, weights, last_change) + + # Apply Dykstra's algorithm with tf.while_loop. + iteration = tf.constant(0) + last_change = {k: zeros for k in last_change} + (_, weights, _) = tf.while_loop(cond, body, (iteration, weights, last_change)) + return tf.reshape(weights, shape=[-1, units]) + + +def laplacian_regularizer(weights, lattice_sizes, l1=0.0, l2=0.0): + """Returns Laplacian regularization loss for `Lattice` layer. + + Laplacian regularizer penalizes the difference between adjacent vertices in + multi-cell lattice (see + [publication](http://jmlr.org/papers/v17/15-243.html)). + + Consider a 3 x 2 lattice with weights `w`: + + ``` + w[3]-----w[4]-----w[5] + | | | + | | | + w[0]-----w[1]-----w[2] + ``` + + where the number at each node represents the weight index. + In this case, the laplacian regularizer is defined as: + + ``` + l1[0] * (|w[1] - w[0]| + |w[2] - w[1]| + + |w[4] - w[3]| + |w[5] - w[4]|) + + l1[1] * (|w[3] - w[0]| + |w[4] - w[1]| + |w[5] - w[2]|) + + + l2[0] * ((w[1] - w[0])^2 + (w[2] - w[1])^2 + + (w[4] - w[3])^2 + (w[5] - w[4])^2) + + l2[1] * ((w[3] - w[0])^2 + (w[4] - w[1])^2 + (w[5] - w[2])^2) + ``` + + Arguments: + weights: `Lattice` weights tensor of shape: `(prod(lattice_sizes), units)`. + lattice_sizes: List or tuple of integers which represents lattice sizes. + l1: l1 regularization amount. Either single float or list or tuple of floats + to specify different regularization amount per dimension. + l2: l2 regularization amount. Either single float or list or tuple of floats + to specify different regularization amount per dimension. + + Returns: + Laplacian regularization loss. + """ + if not l1 and not l2: + return 0.0 + + rank = len(lattice_sizes) + # If regularization amount is given as single float assume same amount for + # every dimension. + if l1 and not isinstance(l1, (list, tuple)): + l1 = [l1] * rank + if l2 and not isinstance(l2, (list, tuple)): + l2 = [l2] * rank + + if weights.shape[1] > 1: + lattice_sizes = lattice_sizes + [int(weights.shape[1])] + rank += 1 + if l1: + l1 = l1 + [0.0] + if l2: + l2 = l2 + [0.0] + weights = tf.reshape(weights, shape=lattice_sizes) + + result = tf.constant(0.0, shape=[], dtype=weights.dtype) + for dim in range(rank): + if (not l1 or not l1[dim]) and (not l2 or not l2[dim]): + continue + if dim > 0: + # Transpose so current dimension becomes first one in order to simplify + # indexing and be able to merge all other dimensions into 1 for better TPU + # performance. + permut = [p for p in range(rank)] + permut[0], permut[dim] = permut[dim], permut[0] + slices = tf.transpose(weights, perm=permut) + else: + slices = weights + slices = tf.reshape(slices, shape=[lattice_sizes[dim], -1]) + + diff = slices[1:] - slices[0:-1] + if l1: + result += tf.reduce_sum(tf.abs(diff)) * l1[dim] + if l2: + result += tf.reduce_sum(tf.square(diff)) * l2[dim] + return result + + +def torsion_regularizer(weights, lattice_sizes, l1=0.0, l2=0.0): + """Returns Torsion regularization loss for `Lattice` layer. + + Lattice torsion regularizer penalizes how much the lattice function twists + from side-to-side (see + [publication](http://jmlr.org/papers/v17/15-243.html)). + + Consider a 3 x 2 lattice with weights `w`: + + ``` + w[3]-----w[4]-----w[5] + | | | + | | | + w[0]-----w[1]-----w[2] + ``` + + In this case, the torsion regularizer is defined as: + + ``` + l1 * (|w[4] + w[0] - w[3] - w[1]| + |w[5] + w[1] - w[4] - w[2]|) + + l2 * ((w[4] + w[0] - w[3] - w[1])^2 + (w[5] + w[1] - w[4] - w[2])^2) + ``` + + Arguments: + weights: `Lattice` weights tensor of shape: `(prod(lattice_sizes), units)`. + lattice_sizes: List or tuple of integers which represents lattice sizes. + l1: l1 regularization amount. Either single float or list or tuple of floats + to specify different regularization amount per dimension. + l2: l2 regularization amount. Either single float or list or tuple of floats + to specify different regularization amount per dimension. The amount for + the interaction term between i and j is the corresponding product of each + per feature amount. + + Returns: + Laplacian regularization loss. + """ + rank = len(lattice_sizes) + if rank == 1 or (not l1 and not l2): + return 0.0 + + # If regularization amount is given as single float assume same amount for + # every dimension. + if l1 and not isinstance(l1, (list, tuple)): + l1 = [math.sqrt(l1)] * rank + if l2 and not isinstance(l2, (list, tuple)): + l2 = [math.sqrt(l2)] * rank + + if weights.shape[1] > 1: + lattice_sizes = lattice_sizes + [int(weights.shape[1])] + rank += 1 + if l1: + l1 = l1 + [0.0] + if l2: + l2 = l2 + [0.0] + weights = tf.reshape(weights, shape=lattice_sizes) + + result = tf.constant(0.0, shape=[], dtype=weights.dtype) + for i in range(rank - 1): + for j in range(i + 1, rank): + if ((not l1 or not l1[i] or not l1[j]) and + (not l2 or not l2[i] or not l2[j])): + continue + if j == 1: + planes = weights + else: + # Transpose so dimensions i and j become first in order to simplify + # indexing and be able to merge all other dimensions into 1 for better + # TPU performance. + permut = [p for p in range(rank)] + permut[0], permut[i] = permut[i], permut[0] + permut[1], permut[j] = permut[j], permut[1] + planes = tf.transpose(weights, perm=permut) + planes = tf.reshape( + planes, shape=[lattice_sizes[i], lattice_sizes[j], -1]) + + a00 = planes[0:-1, 0:-1] + a01 = planes[0:-1, 1:] + a10 = planes[1:, 0:-1] + a11 = planes[1:, 1:] + torsion = a00 + a11 - a01 - a10 + + if l1: + result += tf.reduce_sum(tf.abs(torsion)) * l1[i] * l1[j] + if l2: + result += tf.reduce_sum(tf.square(torsion)) * l2[i] * l2[j] + return result + + +def verify_hyperparameters(lattice_sizes, + units=None, + weights_shape=None, + input_shape=None, + monotonicities=None, + unimodalities=None, + edgeworth_trusts=None, + trapezoid_trusts=None, + monotonic_dominances=None, + joint_monotonicities=None, + output_min=None, + output_max=None, + regularization_amount=None, + regularization_info=""): + """Verifies that all given hyperparameters are consistent. + + This function does not inspect weights themselves. Only their shape. Use + `assert_constraints()` to assert actual weights against constraints. + + See `tfl.lattice_layer.Lattice` class level comment for detailed description + of arguments. + + Args: + lattice_sizes: Lattice sizes to check againts. + units: Units hyperparameter of `Lattice` layer. + weights_shape: Shape of tensor which represents `Lattice` layer weights. + input_shape: Shape of layer input. Useful only if `units` is set. + monotonicities: Monotonicities hyperparameter of `Lattice` layer. + unimodalities: Unimodalities hyperparameter of `Lattice` layer. + edgeworth_trusts: Edgeworth_trusts hyperparameter of `Lattice` layer. + trapezoid_trusts: Trapezoid_trusts hyperparameter of `Lattice` layer. + monotonic_dominances: Monotonic dominances hyperparameter of `Lattice` + layer. + joint_monotonicities: Joint monotonicities hyperparameter of `Lattice` + layer. + output_min: Minimum output of `Lattice` layer. + output_max: Maximum output of `Lattice` layer. + regularization_amount: Regularization amount for regularizers. + regularization_info: String which describes `regularization_amount`. + + Raises: + ValueError: If something is inconsistent. + """ + for size in lattice_sizes: + if size < 2: + raise ValueError("All lattice sizes must be at least 2. Given: %s" % + lattice_sizes) + + # It also raises errors if monotonicities specified incorrectly. + monotonicities = canonicalize_monotonicities(monotonicities) + if monotonicities is not None: + if len(monotonicities) != len(lattice_sizes): + raise ValueError("If provided 'monotonicities' should have same number " + "of elements as 'lattice_sizes'. 'monotonicities': %s," + "'lattice_sizes: %s" % (monotonicities, lattice_sizes)) + + unimodalities = canonicalize_unimodalities(unimodalities) + if unimodalities is not None: + if len(unimodalities) != len(lattice_sizes): + raise ValueError("If provided 'unimodalities' should have same number " + "of elements as 'lattice_sizes'. 'unimodalities': %s, " + "'lattice_sizes: %s" % (unimodalities, lattice_sizes)) + for unimodality, dim_size in zip(unimodalities, lattice_sizes): + if unimodality == 1 and dim_size < 3: + raise ValueError("Unimodal dimensions must have lattice size at " + "least 3. unimodalities: %s, lattice_sizes: %s" % + (unimodalities, lattice_sizes)) + + if monotonicities is not None and unimodalities is not None: + for i, (monotonicity, + unimodality) in enumerate(zip(monotonicities, unimodalities)): + if monotonicity != 0 and unimodality != 0: + raise ValueError("Both monotonicity and unimodality can not be set " + "simultaniously for same dimension. Dimension: %d, " + "'monotonicities': %s, 'unimodalities': %s" % + (i, monotonicities, unimodalities)) + + all_trusts = canonicalize_trust( + (edgeworth_trusts or []) + (trapezoid_trusts or [])) or [] + main_dims, cond_dims, trapezoid_cond_dims = set(), set(), set() + dim_pairs_direction = {} + for i, constraint in enumerate(all_trusts): + main_dim, cond_dim, cond_direction = constraint + if (main_dim >= len(lattice_sizes) or cond_dim >= len(lattice_sizes) or + main_dim < 0 or cond_dim < 0): + raise ValueError("Dimensions constrained by trust constraints " + "are not within the range of the lattice. " + "'trust_dims': %s, %s, num_dims: %s" % + (main_dim, cond_dim, len(lattice_sizes))) + if not isinstance(main_dim, int) or not isinstance(cond_dim, int): + raise ValueError("Trust constraint dimensions must be integers. Seeing " + "main_dim %s and cond_dim %s" % (main_dim, cond_dim)) + if monotonicities[main_dim] != 1: + raise ValueError("Trust constraint's main feature must be " + "monotonic. Dimension %s is not monotonic." % (main_dim)) + if (main_dim, cond_dim) in dim_pairs_direction and dim_pairs_direction[ + (main_dim, cond_dim)] != cond_direction: + raise ValueError("Cannot have two trust constraints on the same pair of " + "features in opposite directions. Features: %d, %d" % + (main_dim, cond_dim)) + # Only apply this check to trapezoid constraints when there are also + # edgeworth constraints. + if edgeworth_trusts and i >= len(edgeworth_trusts): + if cond_dim in trapezoid_cond_dims: + logging.warning( + "Conditional dimension %d is being used in multiple trapezoid " + "trust constraints. Because of this and the presence of edgeworth " + "constraints, there may be slight trust violations of one or more " + "of these constraints at the end of training. Consider increasing " + "num_projection_iterations to reduce violation.", cond_dim) + trapezoid_cond_dims.add(cond_dim) + main_dims.add(main_dim) + cond_dims.add(cond_dim) + dim_pairs_direction[(main_dim, cond_dim)] = cond_direction + main_and_cond = main_dims.intersection(cond_dims) + if main_and_cond: + raise ValueError("A feature cannot be both a main feature and a " + "conditional feature in trust constraints. " + "Seeing dimension %d in both" % (main_and_cond.pop())) + + if monotonic_dominances is not None: + dim_pairs = set([]) + for i, constraint in enumerate(monotonic_dominances): + if len(constraint) != 2: + raise ValueError("Monotonic dominance constraints must consist of 2 " + "elements. Seeing constraint tuple %s" % (constraint,)) + dominant_dim, weak_dim = constraint + if (dominant_dim >= len(lattice_sizes) or + weak_dim >= len(lattice_sizes) or + dominant_dim < 0 or weak_dim < 0): + raise ValueError("Dimensions constrained by monotonic dominance " + "constraints are not within the range of the lattice. " + "'dims': %s, %s, num_dims: %s" % + (dominant_dim, weak_dim, len(lattice_sizes))) + if not isinstance(dominant_dim, int) or not isinstance(weak_dim, int): + raise ValueError("Monotonic dominance constraint dimensions must be " + "integers. Seeing dominant_dim %s and weak_dim %s" % + (dominant_dim, weak_dim)) + for dim in [dominant_dim, weak_dim]: + if monotonicities[dim] != 1: + raise ValueError("Monotonic dominance constraint's features must be " + "monotonic. Dimension %d is not monotonic." % (dim)) + # TODO: Determine partial ordering of features by dominance and + # detect any inconsistencies. + if (weak_dim, dominant_dim) in dim_pairs: + raise ValueError("Cannot have two dominance constraints on the same " + "pair of features conflicting. Features: %d, %d" % + (dominant_dim, weak_dim)) + dim_pairs.add((dominant_dim, weak_dim)) + + if joint_monotonicities is not None: + for i, constraint in enumerate(joint_monotonicities): + if len(constraint) != 2: + raise ValueError("Joint monotonicities constraints must consist of 2 " + "elements. Seeing constraint tuple %s" % (constraint,)) + dim1, dim2 = constraint + if (dim1 >= len(lattice_sizes) or dim2 >= len(lattice_sizes) or + dim1 < 0 or dim2 < 0): + raise ValueError("Dimensions constrained by joint monotonicity " + "constraints are not within the range of the lattice. " + "'dims': %s, %s, num_dims: %s" % + (dim1, dim2, len(lattice_sizes))) + if not isinstance(dim1, int) or not isinstance(dim2, int): + raise ValueError("Joint monotonicity constraint dimensions must be " + "integers. Seeing dimensions %s, %s" % (dim1, dim2)) + + if weights_shape is not None: + if len(weights_shape) != 2: + raise ValueError("Weights must have shape of rank-2. " + "Given: %s" % weights_shape) + expected_num_weights = 1 + for dim_size in lattice_sizes: + expected_num_weights *= dim_size + if weights_shape[0] != expected_num_weights: + raise ValueError("Number of elements in weights does not correspond to " + "lattice sizes. Weights shape: %s, lattice sizes: %s, " + "Number of elements defined by lattice sizes: %d" % + (weights_shape, lattice_sizes, expected_num_weights)) + + if input_shape is not None: + if not isinstance(input_shape, list): + if input_shape[-1] != len(lattice_sizes): + raise ValueError("Last dimension of input shape must have same number " + "of elements as 'lattice_sizes'. 'input shape': %s, " + "'lattice_sizes': %s" % (input_shape, lattice_sizes)) + shape = input_shape + else: + if len(input_shape) != len(lattice_sizes): + raise ValueError("If lattice input is provided as list of tensors their" + " number must match lattice_sizes. 'input list': %s, " + "'lattice_sizes': %s" % (input_shape, lattice_sizes)) + shape = input_shape[0] + if units is not None: # FYI: It is inside "if input_shape is not None:" + if units > 1 and (len(shape) < 3 or shape[-2] != units): + raise ValueError("If 'units' > 1 then input shape of Lattice layer must" + " have rank at least 3 where second from last " + "dimension is equal to 'units'. 'units': %s, " + "input_shape: %s" % (units, input_shape)) + + if output_min is not None and output_max is not None: + if output_min >= output_max: + raise ValueError("'output_min' must be not greater than 'output_max'. " + "'output_min': %f, 'output_max': %f" % + (output_min, output_max)) + + if regularization_amount and isinstance(regularization_amount, (list, tuple)): + if len(regularization_amount) != len(lattice_sizes): + raise ValueError( + "If %s losses are given per dimension their number must " + "match number of dimensions defined by lattice sizes. " + "l1: %s, lattice sizes: %s" % + (regularization_info, regularization_amount, lattice_sizes)) + + +# TODO: investigate whether eps should be bigger. +def assert_constraints(weights, + lattice_sizes, + monotonicities, + edgeworth_trusts, + trapezoid_trusts, + monotonic_dominances, + joint_monotonicities, + output_min=None, + output_max=None, + eps=1e-6): + """Asserts that weights satisfy constraints. + + Args: + weights: `Lattice` weights tensor of shape: `(prod(lattice_sizes), units)`. + lattice_sizes: List or tuple of integers which represents lattice sizes. + monotonicities: Monotonicity constraints. + edgeworth_trusts: Edgeworth trust constraints. + trapezoid_trusts: Trapezoid trust constraints. + monotonic_dominances: Monotonic dominance constraints. + joint_monotonicities: Joint monotonicity constraints. + output_min: None or lower bound constraints. + output_max: None or upper bound constraints. + eps: Allowed constraints violation. + + Returns: + List of assetion ops in graph mode or directly executes assertions in eager + mode. + """ + if weights.shape[1] > 1: + lattice_sizes = lattice_sizes + [int(weights.shape[1])] + if monotonicities: + monotonicities = monotonicities + [0] + weights = tf.reshape(weights, shape=lattice_sizes) + asserts = [] + + for i in range(len(monotonicities or [])): + if monotonicities[i] != 1: + continue + weights_layers = tf.unstack(weights, axis=i) + + for j in range(1, len(weights_layers)): + diff = tf.reduce_min(weights_layers[j] - weights_layers[j - 1]) + asserts.append( + tf.Assert( + diff >= -eps, + data=[ + "Monotonicity violation", "Feature index:", i, + "Min monotonicity diff:", diff, "Upper layer number:", j, + "Epsilon:", eps, "Layers:", weights_layers[j], + weights_layers[j - 1] + ])) + + for main_dim, cond_dim, cond_direction in edgeworth_trusts or []: + weights_layers = _unstack_2d(weights, main_dim, cond_dim) + for i in range(lattice_sizes[main_dim] - 1): + for j in range(lattice_sizes[cond_dim] - 1): + diff = tf.reduce_min( + cond_direction * + ((weights_layers[i + 1][j + 1] - weights_layers[i][j + 1]) - + (weights_layers[i + 1][j] - weights_layers[i][j]))) + asserts.append( + tf.Assert( + diff >= -eps, + data=[ + "Edgeworth trust violation", "Feature indices:", main_dim, + ",", cond_dim, "Min trust diff:", diff, "Epsilon:", eps, + "Layers:", weights_layers[i + 1][j + 1], + weights_layers[i][j + 1], weights_layers[i + 1][j], + weights_layers[i][j] + ])) + + for main_dim, cond_dim, cond_direction in trapezoid_trusts or []: + weights_layers = _unstack_2d(weights, main_dim, cond_dim) + max_main_dim = lattice_sizes[main_dim] - 1 + for j in range(lattice_sizes[cond_dim] - 1): + lhs_diff = tf.reduce_min( + cond_direction * (weights_layers[0][j] - weights_layers[0][j + 1])) + asserts.append( + tf.Assert( + lhs_diff >= -eps, + data=[ + "Trapezoid trust violation", "Feature indices:", main_dim, + ",", cond_dim, "Min trust diff:", lhs_diff, "Epsilon:", eps, + "Layers:", weights_layers[0][j], weights_layers[0][j + 1] + ])) + rhs_diff = tf.reduce_min(cond_direction * + (weights_layers[max_main_dim][j + 1] - + weights_layers[max_main_dim][j])) + asserts.append( + tf.Assert( + rhs_diff >= -eps, + data=[ + "Trapezoid trust violation", "Feature indices:", main_dim, + ",", cond_dim, "Min trust diff:", rhs_diff, "Epsilon:", eps, + "Layers:", weights_layers[max_main_dim][j + 1], + weights_layers[max_main_dim][j] + ])) + + for dominant_dim, weak_dim in monotonic_dominances or []: + weights_layers = _unstack_2d(weights, dominant_dim, weak_dim) + for i in range(lattice_sizes[dominant_dim] - 1): + for j in range(lattice_sizes[weak_dim] - 1): + midpoint = (weights_layers[i + 1][j + 1] + weights_layers[i][j]) / 2 + dominant_diff = tf.reduce_min(weights_layers[i + 1][j] - midpoint) + asserts.append( + tf.Assert( + dominant_diff >= -eps, + data=[ + "Dominance violation", "Feature indices:", dominant_dim, + ",", weak_dim, "Min dominance diff:", dominant_diff, + "Epsilon:", eps, "Layers:", weights_layers[i][j], + weights_layers[i + 1][j], weights_layers[i + 1][j + 1] + ])) + weak_diff = tf.reduce_min(midpoint - weights_layers[i][j + 1]) + asserts.append( + tf.Assert( + weak_diff >= -eps, + data=[ + "Dominance violation", "Feature indices:", dominant_dim, + ",", weak_dim, "Min dominance diff:", weak_diff, "Epsilon:", + eps, "Layers:", weights_layers[i][j], + weights_layers[i + 1][j], weights_layers[i + 1][j + 1] + ])) + + for dim1, dim2 in joint_monotonicities or []: + weights_layers = _unstack_2d(weights, dim1, dim2) + for i in range(lattice_sizes[dim1] - 1): + for j in range(lattice_sizes[dim2] - 1): + midpoint = (weights_layers[i + 1][j] + weights_layers[i][j + 1]) / 2 + lower_triangle_diff = tf.reduce_min( + weights_layers[i + 1][j + 1] - midpoint) + asserts.append( + tf.Assert( + lower_triangle_diff >= -eps, + data=[ + "Joint monotonicity violation", "Feature indices:", dim1, + ",", dim2, "Min lower triangle diff:", lower_triangle_diff, + "Epsilon:", eps, "Layers:", weights_layers[i + 1][j + 1], + weights_layers[i + 1][j], weights_layers[i][j + 1] + ])) + upper_triangle_diff = tf.reduce_min(midpoint - weights_layers[i][j]) + asserts.append( + tf.Assert( + upper_triangle_diff >= -eps, + data=[ + "Joint monotonicity violation", "Feature indices:", dim1, + ",", dim2, "Min upper triangle diff:", upper_triangle_diff, + "Epsilon:", eps, "Layers:", weights_layers[i][j], + weights_layers[i + 1][j], weights_layers[i][j + 1] + ])) + + if output_min is not None: + min_weight = tf.reduce_min(weights) + asserts.append( + tf.Assert( + min_weight >= output_min - eps, + data=[ + "Lower bound violation.", "output_min:", output_min, + "Smallest weight:", min_weight, "Epsilon:", eps, "Weights:", + weights + ])) + + if output_max is not None: + max_weight = tf.reduce_max(weights) + asserts.append( + tf.Assert( + max_weight <= output_max + eps, + data=[ + "Upper bound violation.", "output_max:", output_max, + "Largest weight:", max_weight, "Epsilon:", eps, "Weights:", + weights + ])) + return asserts + + +def count_non_zeros(*iterables): + """Returns total number of non 0 elements in given iterables.""" + result = 0 + for iterable in iterables: + if iterable is not None: + result += [element != 0 for element in iterable].count(True) + return result + + +def canonicalize_monotonicities(monotonicities): + """Converts string constants representing monotonicities into integers. + + Args: + monotonicities: monotonicities hyperparameter of `Lattice` layer. + + Raises: + ValueError if one of monotonicities is invalid. + + Returns: + monotonicities represented as 0 or 1. + """ + if monotonicities: + canonicalized = [] + for item in monotonicities: + if item in [0, 1]: + canonicalized.append(item) + elif isinstance(item, six.string_types) and item.lower() == "increasing": + canonicalized.append(1) + elif isinstance(item, six.string_types) and item.lower() == "none": + canonicalized.append(0) + else: + raise ValueError("'monotonicities' elements must be from: [0, 1, " + "'increasing', 'none']. Given: %s" % monotonicities) + return canonicalized + return None + + +def canonicalize_unimodalities(unimodalities): + """Converts string constants representing unimodalities into integers. + + Args: + unimodalities: unimodalities hyperparameter of `Lattice` layer. + + Raises: + ValueError if one of unimodalities is invalid. + + Returns: + unimodalities represented as 0 or 1. + """ + if unimodalities: + canonicalized = [] + for item in unimodalities: + if item in [0, 1]: + canonicalized.append(item) + elif isinstance(item, six.string_types) and item.lower() == "valley": + canonicalized.append(1) + elif isinstance(item, six.string_types) and item.lower() == "none": + canonicalized.append(0) + else: + raise ValueError("'unimodalities' elements must be from: [0, 1, " + "'valley', 'none']. Given: %s" % unimodalities) + return canonicalized + return None + + +def canonicalize_trust(trusts): + """Converts string constants representing trust direction into integers. + + Args: + trusts: edgeworth_trusts or trapezoid_trusts hyperparameter of `Lattice` + layer. + + Raises: + ValueError if one of trust constraints is invalid. + + Returns: + Trust constraints with direction represented as 0 or 1. + """ + if trusts: + canonicalized = [] + for item in trusts: + if len(item) != 3: + raise ValueError("Trust constraints must consist of 3 elements. Seeing " + "constraint tuple %s" % item) + direction = item[2] + if direction in [-1, 1]: + canonicalized.append(item) + elif (isinstance(direction, six.string_types) and + direction.lower() == "positive"): + canonicalized.append((item[0], item[1], 1)) + elif (isinstance(direction, six.string_types) and + direction.lower() == "negative"): + canonicalized.append((item[0], item[1], -1)) + else: + raise ValueError("trust constraint direction must be from: [-1, 1, " + "'negative', 'positive']. Given: %s" % direction) + return canonicalized + return None + + +def _unstack_2d(tensor, first_dim, second_dim): + """Returns list of list of tensors resulting from two unstack operations.""" + layers = tf.unstack(tensor, axis=first_dim) + unstacked_second_dim = ( + second_dim if second_dim < first_dim else second_dim - 1) + return [tf.unstack(layer, axis=unstacked_second_dim) for layer in layers] + + +def _stack_2d(layers, first_dim, second_dim): + """Returns tensor that re-stacks tensor layers formed from unstacking.""" + unstacked_second_dim = ( + second_dim if second_dim < first_dim else second_dim - 1) + layers = [tf.stack(layer, axis=unstacked_second_dim) for layer in layers] + return tf.stack(layers, axis=first_dim) + + +def _reverse_second_list_dimension(layers): + """Reverses each list within a list of lists, but not the outer list.""" + return [layer[::-1] for layer in layers] diff --git a/tensorflow_lattice/python/lattice_test.py b/tensorflow_lattice/python/lattice_test.py new file mode 100644 index 0000000..b974cd2 --- /dev/null +++ b/tensorflow_lattice/python/lattice_test.py @@ -0,0 +1,1472 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for Lattice Layer.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +from absl import logging +from absl.testing import parameterized +import numpy as np +import tensorflow as tf +from tensorflow import keras +from tensorflow_lattice.python import lattice_layer as ll +from tensorflow_lattice.python import test_utils + + +class LatticeTest(parameterized.TestCase, tf.test.TestCase): + + def setUp(self): + super(LatticeTest, self).setUp() + self.disable_all = False + self.disable_ensembles = False + self.loss_eps = 0.0001 + self.small_eps = 1e-6 + + def _ResetAllBackends(self): + keras.backend.clear_session() + tf.compat.v1.reset_default_graph() + + def _ScatterXUniformly(self, num_points, lattice_sizes): + """Deterministically generates num_point random points within lattice.""" + np.random.seed(41) + x = [] + for _ in range(num_points): + point = [ + np.random.random() * (num_vertices - 1.0) + for num_vertices in lattice_sizes + ] + x.append(np.asarray(point)) + if len(lattice_sizes) == 1: + x.sort() + return x + + def _ScatterXUniformlyExtendedRange(self, num_points, lattice_sizes): + """Extends every dimension by 1.0 on both sides and generates points.""" + np.random.seed(41) + x = [] + for _ in range(num_points): + point = [ + np.random.random() * (num_vertices + 1.0) - 1.0 + for num_vertices in lattice_sizes + ] + x.append(np.asarray(point)) + if len(lattice_sizes) == 1: + x.sort() + return x + + def _SameValueForAllDims(self, num_points, lattice_sizes): + """Generates random point with same value for every dimension.""" + if lattice_sizes.count(lattice_sizes[0]) != len(lattice_sizes): + raise ValueError("All dimensions must be of same size. " + "They are: {}".format(lattice_sizes)) + np.random.seed(41) + x = [] + for _ in range(num_points): + rand = np.random.random() * (lattice_sizes[0] - 1.0) + point = [rand] * len(lattice_sizes) + x.append(np.asarray(point)) + if len(lattice_sizes) == 1: + x.sort() + return x + + def _TwoDMeshGrid(self, num_points, lattice_sizes): + """Mesh grid for visualisation of 3-d surfaces via pyplot.""" + if len(lattice_sizes) != 2: + raise ValueError("2-d mesh grid is possible only for 2-d lattice. Lattice" + " sizes given: %s" % lattice_sizes) + return test_utils.two_dim_mesh_grid( + num_points=num_points, + x_min=0.0, + y_min=0.0, + x_max=lattice_sizes[0] - 1.0, + y_max=lattice_sizes[1] - 1.0) + + def _TwoDMeshGridExtendedRange(self, num_points, lattice_sizes): + """Mesh grid extended by 1.0 on every side.""" + if len(lattice_sizes) != 2: + raise ValueError("2-d mesh grid is possible only for 2-d lattice. Lattice" + " sizes given: %s" % lattice_sizes) + return test_utils.two_dim_mesh_grid( + num_points=num_points, + x_min=-1.0, + y_min=-1.0, + x_max=lattice_sizes[0], + y_max=lattice_sizes[1]) + + def _Sin(self, x): + return math.sin(x[0]) + + def _SinPlusX(self, x): + return math.sin(x[0]) + x[0] / 3.0 + + def _SinPlusLargeX(self, x): + return math.sin(x[0]) + x[0] + + def _SinPlusXNd(self, x): + res = 0.0 + for y in x: + res = res + math.sin(y) + y / 5.0 + return res + + def _SinOfSum(self, x): + return math.sin(sum(x)) + + def _Square(self, x): + return x[0]**2 + + def _Max(self, x): + return np.amax(x) + + def _WeightedSum(self, x): + result = 0.0 + for i in range(len(x)): + result += (i + 1.0) * x[i] + return result + + def _MixedSignWeightedSum(self, x): + result = 0.0 + for i in range(len(x)): + sign = (i % 2) * -2 + 1 + result += sign * (i + 1.0) * x[i] + return result + + def _PseudoLinear(self, x): + result = 0.0 + for i in range(len(x)): + result += 2 * x[i] + for j in range(len(x)): + if i != j: + result += x[i] * x[j] + return result + + def _ScaledSum(self, x): + result = 0.0 + for y in x: + result += y / len(x) + return result + + def _GetMultiOutputInitializer(self, weights): + """Tiles given weights along 'units' dimension.""" + def Initializer(shape, dtype): + return tf.tile(tf.constant(weights, shape=[len(weights), 1], dtype=dtype), + multiples=[1, shape[1]]) + return Initializer + + def _GetTrainingInputsAndLabels(self, config): + """Generates training inputs and labels. + + Args: + config: Dictionary with config for this unit test. + + Returns: + Tuple `(training_inputs, training_labels, raw_training_inputs)` where + `training_inputs` and `training_labels` are data for training and + `raw_training_inputs` are representation of training_inputs for + visualisation. + """ + raw_training_inputs = config["x_generator"]( + num_points=config["num_training_records"], + lattice_sizes=config["lattice_sizes"]) + + if isinstance(raw_training_inputs, tuple): + # This means that raw inputs are 2-d mesh grid. Convert them into list of + # 2-d points. + training_inputs = list(np.dstack(raw_training_inputs).reshape((-1, 2))) + else: + training_inputs = raw_training_inputs + + training_labels = [config["y_function"](x) for x in training_inputs] + return training_inputs, training_labels, raw_training_inputs + + def _SetDefaults(self, config): + config.setdefault("monotonicities", None) + config.setdefault("unimodalities", None) + config.setdefault("edgeworth_trusts", None) + config.setdefault("trapezoid_trusts", None) + config.setdefault("monotonic_dominances", None) + config.setdefault("joint_monotonicities", None) + config.setdefault("output_min", None) + config.setdefault("output_max", None) + config.setdefault("signal_name", "TEST") + config.setdefault("kernel_initializer", "linear_initializer") + config.setdefault("num_projection_iterations", 10) + config.setdefault("monotonic_at_every_step", True) + config.setdefault("target_monotonicity_diff", 0.0) + config.setdefault("kernel_regularizer", None) + config.setdefault("units", 1) + config.setdefault("lattice_index", 0) + + return config + + def _TestEnsemble(self, config): + """Verifies that 'units > 1' lattice produces same output as 'units==1'.""" + if self.disable_ensembles: + return + config = dict(config) + config["num_training_epoch"] = 3 + losses = [] + for units, lattice_index in [(1, 0), (3, 0), (3, 2)]: + config["units"] = units + config["lattice_index"] = lattice_index + losses.append(self._TrainModel(config)) + self.assertAlmostEqual(min(losses), max(losses), delta=self.loss_eps) + + def _TrainModel(self, config, plot_path=None): + logging.info("Testing config:") + logging.info(config) + config = self._SetDefaults(config) + self._ResetAllBackends() + + training_inputs, training_labels, raw_training_inputs = ( + self._GetTrainingInputsAndLabels(config)) + + units = config["units"] + lattice_sizes = config["lattice_sizes"] + if units > 1: + # In order to test multi 'units' lattice replecate inputs 'units' times + # and later use just one out of 'units' outputs in order to ensure that + # multi 'units' lattice trains exactly similar to single 'units' one. + training_inputs = [ + np.tile(np.expand_dims(x, axis=0), reps=[units, 1]) + for x in training_inputs + ] + input_shape = (units, len(lattice_sizes)) + else: + input_shape = (len(lattice_sizes),) + + keras_layer = ll.Lattice( + lattice_sizes=lattice_sizes, + units=units, + monotonicities=config["monotonicities"], + unimodalities=config["unimodalities"], + edgeworth_trusts=config["edgeworth_trusts"], + trapezoid_trusts=config["trapezoid_trusts"], + monotonic_dominances=config["monotonic_dominances"], + joint_monotonicities=config["joint_monotonicities"], + output_min=config["output_min"], + output_max=config["output_max"], + num_projection_iterations=config["num_projection_iterations"], + monotonic_at_every_step=config["monotonic_at_every_step"], + kernel_initializer=config["kernel_initializer"], + kernel_regularizer=config["kernel_regularizer"], + input_shape=input_shape, + dtype=tf.float32) + model = keras.models.Sequential() + model.add(keras_layer) + + if units > 1: + lattice_index = config["lattice_index"] + model.add(keras.layers.Lambda( + lambda x: x[:, lattice_index:lattice_index + 1])) + + optimizer = config["optimizer"](learning_rate=config["learning_rate"]) + model.compile(loss=keras.losses.mean_squared_error, optimizer=optimizer) + + training_data = (training_inputs, training_labels, raw_training_inputs) + loss = test_utils.run_training_loop( + config=config, + training_data=training_data, + keras_model=model, + plot_path=plot_path) + + if tf.executing_eagerly(): + tf.print("final weights: ", keras_layer.kernel) + assetion_ops = keras_layer.assert_constraints( + eps=-config["target_monotonicity_diff"]) + if not tf.executing_eagerly() and assetion_ops: + tf.compat.v1.keras.backend.get_session().run(assetion_ops) + + return loss + + def testMonotonicityOneD(self): + if self.disable_all: + return + config = { + "lattice_sizes": [20], + "num_training_records": 128, + "num_training_epoch": 50, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._SinPlusX, + "monotonicities": [1], + "output_min": 0.0, + "output_max": 7.0, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.110467, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [20], + "num_training_records": 100, + "num_training_epoch": 50, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._ScatterXUniformly, + "y_function": lambda x: -self._SinPlusX(x), + "monotonicities": ["increasing"], + "output_min": -7.0, + "output_max": 0.0, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 2.889168, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [5], + "num_training_records": 100, + "num_training_epoch": 50, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._SinPlusLargeX, + "monotonicities": [1], + "output_min": 0.0, + "output_max": 6.0, + # Target function is strictly increasing. + "target_monotonicity_diff": 0.02, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.000781, delta=self.loss_eps) + + def testMonotonicityTwoD(self): + if self.disable_all: + return + config = { + "lattice_sizes": [21, 6], + "num_training_records": 900, + "num_training_epoch": 100, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._TwoDMeshGrid, + "y_function": self._SinPlusXNd, + "monotonicities": [1, 1], + "output_min": 0.0, + "output_max": 7.0, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.443284, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [6, 21], + "num_training_records": 900, + "num_training_epoch": 100, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._TwoDMeshGrid, + "y_function": self._SinPlusXNd, + "monotonicities": [1, 1], + "output_min": 0.0, + "output_max": 7.0, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.443284, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [6, 21], + "num_training_records": 900, + "num_training_epoch": 100, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._TwoDMeshGrid, + "y_function": self._SinPlusXNd, + "monotonicities": ["none", "increasing"], + "output_min": 0.0, + "output_max": 7.0, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.202527, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [6, 21], + "num_training_records": 900, + "num_training_epoch": 100, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.5, + "x_generator": self._TwoDMeshGrid, + "y_function": self._SinPlusXNd, + "monotonicities": [1, 0], + "output_min": 0.0, + "output_max": 7.0, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.244739, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [2, 2], + "num_training_records": 100, + "num_training_epoch": 20, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._TwoDMeshGrid, + "y_function": lambda x: -self._ScaledSum(x), + "monotonicities": [1, 1], + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.051462, delta=self.loss_eps) + self._TestEnsemble(config) + + def testMonotonicity5d(self): + if self.disable_all: + return + config = { + "lattice_sizes": [2, 2, 2, 2, 2], + "num_training_records": 100, + "num_training_epoch": 200, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._ScaledSum, + "monotonicities": [1, 1, 1, 1, 1], + "kernel_initializer": keras.initializers.Constant(value=0.5), + # Function is strictly increasing everywhere, so request monotonicity + # diff to be strictly positive. + "target_monotonicity_diff": 0.08, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.000002, delta=self.loss_eps) + + config = { + "lattice_sizes": [2, 2, 2, 2, 2], + "num_training_records": 100, + "num_training_epoch": 40, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._ScatterXUniformly, + "y_function": lambda x: -self._ScaledSum(x), + "monotonicities": [1, 1, 1, 1, 1], + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.014971, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [3, 3, 3, 3], + "num_training_records": 100, + "num_training_epoch": 100, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._SinOfSum, + "monotonicities": [1, "increasing", 1, 1], + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.358079, delta=self.loss_eps) + self._TestEnsemble(config) + + @parameterized.parameters( + ([0, 1, 1],), + ([1, 0, 1],), + ([1, 1, 0],), + ) + def testMonotonicityEquivalence(self, monotonicities): + if self.disable_all: + return + config = { + "lattice_sizes": [3, 3, 3], + "monotonicities": monotonicities, + "num_training_records": 100, + "num_training_epoch": 50, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 10.0, + "x_generator": self._SameValueForAllDims, + "y_function": self._SinOfSum, + "kernel_initializer": "zeros", + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.000286, delta=self.loss_eps) + self._TestEnsemble(config) + + def testMonotonicity10dAlmostMonotone(self): + if self.disable_all: + return + np.random.seed(4411) + num_weights = 1024 + weights = [1.0 * i / num_weights for i in range(num_weights)] + for _ in range(10): + i = int(np.random.random() * num_weights) + weights[i] = 0.0 + + config = { + "lattice_sizes": [2] * 10, + "num_training_records": 1000, + "num_training_epoch": 100, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 100.0, + "x_generator": self._ScatterXUniformly, + "y_function": test_utils.get_hypercube_interpolation_fn(weights), + "monotonicities": [1] * 10, + "kernel_initializer": "zeros", + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.000027, delta=self.loss_eps) + + config["monotonicities"] = [0, 1, 0, 1, 1, 0, 1, 1, 1, 0] + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.000019, delta=self.loss_eps) + self._TestEnsemble(config) + + def testMonotonicity10dSinOfSum(self): + if self.disable_all: + return + config = { + "lattice_sizes": [2] * 10, + "num_training_records": 1000, + "num_training_epoch": 100, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 100.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._SinOfSum, + "monotonicities": [1] * 10, + "output_min": -1.0, + "output_max": 1.0, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.089950, delta=self.loss_eps) + + config["monotonicities"] = [0, 1, 0, 1, 1, 0, 1, 1, 1, 0] + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.078830, delta=self.loss_eps) + + config["monotonicities"] = [0, 0, 0, 1, 0, 1, 0, 0, 0, 0] + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.052190, delta=self.loss_eps) + self._TestEnsemble(config) + + @parameterized.parameters( + ([(0, 1, 1)], [], 0.025785), + (None, [(0, 1, 1)], 0.042566), + ([(0, 1, "positive")], [(0, 1, "positive")], 0.042566), + ) + def testSimpleTrustTwoD(self, edgeworth_trusts, trapezoid_trusts, + expected_loss): + if self.disable_all: + return + config = { + "lattice_sizes": [2, 2], + "num_training_records": 100, + "num_training_epoch": 20, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._TwoDMeshGrid, + "y_function": self._Max, + "monotonicities": [1, 0], + "edgeworth_trusts": edgeworth_trusts, + "trapezoid_trusts": trapezoid_trusts, + "output_min": 0.0, + "output_max": 1.0, + # Leave margin of error (floating point) for trust projection. + "target_monotonicity_diff": -1e-6, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self.loss_eps) + self._TestEnsemble(config) + + @parameterized.parameters( + ([(1, 0, -1)], None, 3.23711), + (None, [(1, 0, -1)], 6.663453), + ([(1, 0, "negative")], [(1, 0, "negative")], 9.846122), + ) + def testDenseTrustTwoD(self, edgeworth_trusts, trapezoid_trusts, + expected_loss): + if self.disable_all: + return + config = { + "lattice_sizes": [4, 3], + "num_training_records": 150, + "num_training_epoch": 20, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 10.0, + "x_generator": self._TwoDMeshGrid, + "y_function": self._PseudoLinear, + "monotonicities": [0, 1], + "edgeworth_trusts": edgeworth_trusts, + "trapezoid_trusts": trapezoid_trusts, + "output_min": 0.0, + "output_max": 22.0, + # Leave margin of error (floating point) for trust projection. + "target_monotonicity_diff": -1e-5, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self.loss_eps) + if not edgeworth_trusts or not trapezoid_trusts: + self._TestEnsemble(config) + + @parameterized.parameters( + ([(0, 1, 1)], None, 0.010525), + (None, [(0, 1, 1)], 0.013343), + ([(0, 1, 1)], [(0, 1, 1)], 0.013343), + ) + def testSimpleTrust4D(self, edgeworth_trusts, trapezoid_trusts, + expected_loss): + if self.disable_all: + return + config = { + "lattice_sizes": [2, 2, 2, 2], + "num_training_records": 100, + "num_training_epoch": 20, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._Max, + "monotonicities": [1, 0, 1, 1], + "edgeworth_trusts": edgeworth_trusts, + "trapezoid_trusts": trapezoid_trusts, + "output_min": 0.0, + "output_max": 1.0, + # Leave margin of error (floating point) for trust projection. + "target_monotonicity_diff": -1e-6, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self.loss_eps) + self._TestEnsemble(config) + + @parameterized.parameters( + ([(0, 1, 1), (3, 1, -1), (3, 2, 1)], None, 0.334325), + (None, [(0, 1, 1), (3, 1, -1), (3, 2, 1)], 0.387444), + ([(0, 1, 1), (3, 1, -1)], [(3, 1, -1), (3, 2, 1)], 0.381514), + ) + def testMultiDenseTrust4D(self, edgeworth_trusts, trapezoid_trusts, + expected_loss): + if self.disable_all: + return + config = { + "lattice_sizes": [3, 3, 3, 3], + "num_training_records": 1000, + "num_training_epoch": 20, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._SinOfSum, + "monotonicities": [1, 0, 0, 1], + "edgeworth_trusts": edgeworth_trusts, + "trapezoid_trusts": trapezoid_trusts, + "output_min": -0.5, + "output_max": 0.9, + # Leave margin of error (floating point) for trust projection. + "target_monotonicity_diff": -1e-6, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self.loss_eps) + if not edgeworth_trusts or not trapezoid_trusts: + self._TestEnsemble(config) + + @parameterized.parameters( + ([(0, 1, 1)],), + ([(1, 2, 1)],), + ([(2, 0, 1)],), + ) + def testEdgeworthTrustEquivalence(self, edgeworth_trusts): + if self.disable_all: + return + config = { + "lattice_sizes": [3, 3, 3], + "monotonicities": [1, 1, 1], + "edgeworth_trusts": edgeworth_trusts, + "num_training_records": 100, + "num_training_epoch": 50, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 10.0, + "x_generator": self._SameValueForAllDims, + "y_function": self._PseudoLinear, + "kernel_initializer": "zeros", + # Leave margin of error (floating point) for trust projection. + "target_monotonicity_diff": -1e-6, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.006912, delta=self.loss_eps) + self._TestEnsemble(config) + + @parameterized.parameters( + (None, 0.00000), + ([(1, 0)], 0.00000), + ([(0, 1)], 0.05092), + ) + def testSimpleMonotonicDominance2D(self, monotonic_dominances, expected_loss): + if self.disable_all: + return + config = { + "lattice_sizes": [2, 2], + "num_training_records": 100, + "num_training_epoch": 20, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._TwoDMeshGrid, + "y_function": self._WeightedSum, + "monotonicities": [1, 1], + "monotonic_dominances": monotonic_dominances, + "output_min": 0.0, + "output_max": 3.0, + # Leave margin of error (floating point) for dominance projection. + "target_monotonicity_diff": -1e-6, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self.loss_eps) + self._TestEnsemble(config) + + @parameterized.parameters( + (None, 0.00113), + ([(1, 0)], 0.00113), + ([(0, 1)], 0.81520), + ) + def testDenseMonotonicDominance2D(self, monotonic_dominances, expected_loss): + if self.disable_all: + return + config = { + "lattice_sizes": [5, 5], + "num_training_records": 100, + "num_training_epoch": 20, + "num_projection_iterations": 20, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._TwoDMeshGrid, + "y_function": self._WeightedSum, + "monotonicities": [1, 1], + "monotonic_dominances": monotonic_dominances, + "output_min": 0.0, + "output_max": 12.0, + # Leave margin of error (floating point) for dominance projection. + "target_monotonicity_diff": -1e-2, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self.loss_eps) + self._TestEnsemble(config) + + @parameterized.parameters( + ([(1, 0), (2, 1)], 2.52985), + ([(0, 1), (1, 2)], 6.16700), + ) + def testDenseMonotonicDominance5D(self, monotonic_dominances, expected_loss): + if self.disable_all: + return + config = { + "lattice_sizes": [5, 5, 5, 5, 5], + "num_training_records": 100, + "num_training_epoch": 300, + "num_projection_iterations": 20, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._WeightedSum, + "monotonicities": [1, 1, 1, 1, 1], + "monotonic_dominances": monotonic_dominances, + "output_min": 0.0, + "output_max": 60.0, + # Leave margin of error (floating point) for dominance projection. + "target_monotonicity_diff": -1e-1, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self.loss_eps) + self._TestEnsemble(config) + + @parameterized.parameters( + (None, 0.00000), + ([(0, 1)], 0.05092), + ([(1, 0)], 0.05092), + ) + def testSimpleJointMonotonicity2D(self, joint_monotonicities, expected_loss): + if self.disable_all: + return + config = { + "lattice_sizes": [2, 2], + "num_training_records": 100, + "num_training_epoch": 20, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._TwoDMeshGrid, + "y_function": self._MixedSignWeightedSum, + "monotonicities": [0, 0], + "joint_monotonicities": joint_monotonicities, + "output_min": -2.0, + "output_max": 1.0, + # Leave margin of error (floating point) for dominance projection. + "target_monotonicity_diff": -1e-6, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self.loss_eps) + self._TestEnsemble(config) + + @parameterized.parameters( + (None, 0.16301), + ([(0, 1)], 0.86386), + ([(1, 0)], 0.86413), + ) + def testDenseJointMonotonicity2D(self, joint_monotonicities, expected_loss): + if self.disable_all: + return + config = { + "lattice_sizes": [5, 5], + "num_training_records": 100, + "num_training_epoch": 40, + "num_projection_iterations": 40, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._TwoDMeshGrid, + "y_function": self._MixedSignWeightedSum, + "monotonicities": [0, 0], + "joint_monotonicities": joint_monotonicities, + "output_min": -8.0, + "output_max": 4.0, + # Leave margin of error (floating point) for dominance projection. + "target_monotonicity_diff": -1e-2, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self.loss_eps) + self._TestEnsemble(config) + + @parameterized.parameters( + ([(0, 1)], 36.75898), + ) + def testDenseJointMonotonicity5D(self, joint_monotonicities, expected_loss): + if self.disable_all: + return + config = { + "lattice_sizes": [5, 5, 5, 5, 5], + "num_training_records": 100, + "num_training_epoch": 100, + "num_projection_iterations": 40, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._MixedSignWeightedSum, + "monotonicities": [0, 0, 0, 0, 0], + "joint_monotonicities": joint_monotonicities, + "output_min": -24.0, + "output_max": 36.0, + # Leave margin of error (floating point) for dominance projection. + "target_monotonicity_diff": -1e-1, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self.loss_eps) + self._TestEnsemble(config) + + @parameterized.parameters( + # Custom TFL initializer: + ("linear_initializer", 0.126068), + # Standard Keras initializer: + (keras.initializers.Constant(value=1.5), 0.430379), + # Standard Keras initializer specified as string constant: + ("zeros", 1.488072), + ) + def testInitializerType(self, initializer, expected_loss): + if self.disable_all: + return + config = { + "lattice_sizes": [2, 3], + "num_training_records": 98, + "num_training_epoch": 0, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._TwoDMeshGrid, + "y_function": self._Max, + "output_min": 0.0, + "output_max": 2.0, + "kernel_initializer": initializer, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self.loss_eps) + self._TestEnsemble(config) + + def _MergeDicts(self, x, y): + z = dict(x) + z.update(y) + return z + + def testLinearMonotonicInitializer(self): + if self.disable_all: + return + # Test initializer by training linear function using 0 iteration and verify + # that loss is 0. + config = { + "num_training_records": 96, + "num_training_epoch": 0, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._TwoDMeshGrid, + } # pyformat: disable + + init_config = { + "lattice_sizes": [3, 4], + "monotonicities": [0, 0], + "output_min": -1.0, + "output_max": 2.0, + } + config["kernel_initializer"] = "LinearInitializer" + config["y_function"] = test_utils.get_linear_lattice_interpolation_fn( + **init_config) + total_config = self._MergeDicts(config, init_config) + loss = self._TrainModel(total_config) + self.assertAlmostEqual(loss, 0.0, delta=self.small_eps) + self._TestEnsemble(total_config) + + # Change generator since we need more than 2 dimensions from now on. + config["x_generator"] = self._ScatterXUniformly + + init_config = { + "lattice_sizes": [2, 3, 4, 5], + "monotonicities": [1, 1, 0, 1], + "output_min": 12.0, + "output_max": 22.0, + } + config["kernel_initializer"] = ll.LinearInitializer(**init_config) + config["y_function"] = test_utils.get_linear_lattice_interpolation_fn( + **init_config) + total_config = self._MergeDicts(config, init_config) + loss = self._TrainModel(total_config) + self.assertAlmostEqual(loss, 0.0, delta=self.small_eps) + self._TestEnsemble(total_config) + + init_config = { + "lattice_sizes": [2, 3, 4, 5], + "monotonicities": [0, 1, 0, 1], + "output_min": -10, + "output_max": -5, + } + config["kernel_initializer"] = ll.LinearInitializer(**init_config) + config["y_function"] = test_utils.get_linear_lattice_interpolation_fn( + **init_config) + total_config = self._MergeDicts(config, init_config) + loss = self._TrainModel(total_config) + self.assertAlmostEqual(loss, 0.0, delta=self.small_eps) + self._TestEnsemble(total_config) + + # Try to fit some other function and see loss >0 to ensure that this test + # does not always returns 0. + config["y_function"] = self._SinOfSum + total_config = self._MergeDicts(config, init_config) + loss = self._TrainModel(total_config) + self.assertGreater(loss, 0.1) + self._TestEnsemble(total_config) + + init_config = { + "lattice_sizes": [2, 3, 4, 5], + "monotonicities": [0, 0, 0, 0], + "output_min": 1.0, + "output_max": 3.0, + } + config["kernel_initializer"] = "linear_initializer" + config["y_function"] = test_utils.get_linear_lattice_interpolation_fn( + **init_config) + total_config = self._MergeDicts(config, init_config) + loss = self._TrainModel(total_config) + self.assertAlmostEqual(loss, 0.0, delta=self.small_eps) + self._TestEnsemble(total_config) + + def testUnimodalInitializer(self): + if self.disable_all: + return + config = { + "lattice_sizes": [3, 4], + "unimodalities": [1, 1], + "kernel_initializer": "linear_initializer", + "num_training_records": 96, + "num_training_epoch": 0, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._TwoDMeshGrid, + "y_function": self._Max, + "output_min": 0.0, + "output_max": 2.0, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 1.292362, delta=self.loss_eps) + self._TestEnsemble(config) + + config["unimodalities"] = ["valley", "none"] + config["monotonicities"] = ["none", "increasing"] + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.794330, delta=self.loss_eps) + self._TestEnsemble(config) + + def testAssertMonotonicity(self): + if self.disable_all: + return + # Specify non monotonic initializer and do 0 training iterations so no + # projections are being executed. + config = { + "lattice_sizes": [2, 2], + "num_training_records": 100, + "num_training_epoch": 0, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.15, + "x_generator": self._TwoDMeshGrid, + "y_function": self._ScaledSum, + "monotonicities": [0, 0], + "kernel_initializer": self._GetMultiOutputInitializer( + weights=[4.0, 3.0, 2.0, 1.0]) + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 4.865740, delta=self.loss_eps) + + for monotonicity in [[0, 1], [1, 0], [1, 1]]: + for units in [1, 3]: + config["monotonicities"] = monotonicity + config["units"] = units + with self.assertRaises(tf.errors.InvalidArgumentError): + self._TrainModel(config) + + def testBounds(self): + if self.disable_all: + return + config = { + "lattice_sizes": [20], + "num_training_records": 100, + "num_training_epoch": 40, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._Sin, + "output_min": -0.6, + "output_max": 0.4, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.109398, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [11, 4], + "num_training_records": 270, + "num_training_epoch": 40, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._TwoDMeshGrid, + "y_function": self._SinPlusXNd, + "monotonicities": [1, 1], + "output_min": 1.0, + "output_max": 2.5, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.380813, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [2] * 5, + "num_training_records": 100, + "num_training_epoch": 40, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._SinOfSum, + "monotonicities": [1, 1, 0, 1, 0], + "output_min": 0.3, + "output_max": 0.7, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.145910, delta=self.loss_eps) + self._TestEnsemble(config) + + def testInputOutOfBounds(self): + if self.disable_all: + return + config = { + "lattice_sizes": [6], + "num_training_records": 100, + "num_training_epoch": 20, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._ScatterXUniformlyExtendedRange, + "y_function": self._Sin, + "kernel_initializer": keras.initializers.Zeros(), + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.018727, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [2, 2], + "num_training_records": 100, + "num_training_epoch": 20, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._TwoDMeshGridExtendedRange, + "y_function": self._SinOfSum, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.130813, delta=self.loss_eps) + self._TestEnsemble(config) + + @parameterized.parameters( + # Laplacian with l1 and l2: + (("laplacian", 0.005, 0.01), 0.03, 0.021399), + # Different regularization amount for every dimension: + (("laplacian", [0.005, 0.01], [0.01, 0.02]), 0.045, 0.027941), + # Torsion with l1 and l2: + (("torsion", 0.1, 0.01), 0.11, 0.06738), + # Different regularization amount for every dimension: + (("torsion", [2.0, 0.05], [0.1, 0.1]), 0.11, 0.06738), + # List of regularizers: + ([("torsion", 0.1, 0.0), ("Torsion", 0.0, 0.01)], 0.11, 0.06738), + # Standard Keras regularizer: + (keras.regularizers.l1_l2(l1=0.01, l2=0.1), 0.33, 0.214418), + ) + def testRegularizers2d(self, regularizer, pure_reg_loss, training_loss): + if self.disable_all: + return + weights = [0.0, 1.0, 1.0, 1.0] + config = { + "lattice_sizes": [2, 2], + "num_training_records": 100, + "num_training_epoch": 0, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._TwoDMeshGrid, + "y_function": test_utils.get_hypercube_interpolation_fn( + coefficients=weights), + "kernel_initializer": self._GetMultiOutputInitializer(weights=weights), + "kernel_regularizer": regularizer, + } # pyformat: disable + loss = self._TrainModel(config) + # This loss is pure regularization loss because initializer matches target + # function and there was 0 training epochs. + self.assertAlmostEqual(loss, pure_reg_loss, delta=self.loss_eps) + + multioutput_config = dict(config) + units = 3 + multioutput_config["units"] = units + loss = self._TrainModel(multioutput_config) + self.assertAlmostEqual(loss, pure_reg_loss * units, delta=self.loss_eps) + + config["num_training_epoch"] = 20 + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, training_loss, delta=self.loss_eps) + + @parameterized.parameters( + (("torsion", 0.001, 0.0001), 0.147405), + (("laplacian", 0.001, 0.0001), 0.193870), + ) + def testRegularizersLargeLattice(self, regularizer, expected_loss): + if self.disable_all: + return + config = { + "lattice_sizes": [3, 4, 3, 4], + "num_training_records": 100, + "num_training_epoch": 100, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._SinOfSum, + "kernel_regularizer": regularizer, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self.loss_eps) + + def testHighDimensionsStressTest(self): + if self.disable_all: + return + lattice_sizes = [3, 3] + [2] * 14 + monotonicities = [0] * 16 + monotonicities[3], monotonicities[4], monotonicities[10] = (1, 1, 1) + unimodalities = [0] * 16 + unimodalities[1] = 1 + config = { + "lattice_sizes": lattice_sizes, + "units": 2, + "monotonicities": monotonicities, + "unimodalities": unimodalities, + "edgeworth_trusts": [(3, 2, 1)], + "output_min": 0.0, + "output_max": 1.0, + "num_training_records": 100, + "num_training_epoch": 3, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1000.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._SinOfSum, + "kernel_regularizer": [("torsion", 1e-6, 1e-6), + ("laplacian", 1e-5, 1e-5)], + "target_monotonicity_diff": -1e-5, + } # pyformat: disable + loss = self._TrainModel(config) + # TODO: this test behaves differently in graph and eager mode. + # Figure out why. + self.assertAlmostEqual(loss, 3.689727, delta=0.1) + + @parameterized.parameters( + ([0], [0], 0.026734), + ([1], ["none"], 0.195275), + ([1], None, 0.195275), + ([0], ["valley"], 0.045627), + (None, [1], 0.045627), + ) + def testUnimodalityOneD(self, monotonicities, unimodalities, expected_loss): + if self.disable_all: + return + + def WShaped1dFunction(x): + d = min(abs(x[0] - 3.0), abs(x[0] - 7.0)) + return d * d / 4.0 + + config = { + "lattice_sizes": [11], + "num_training_records": 128, + "num_training_epoch": 200, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.15, + "x_generator": self._ScatterXUniformly, + "y_function": WShaped1dFunction, + "monotonicities": monotonicities, + "unimodalities": unimodalities, + "kernel_initializer": "linear_initializer", + "output_min": 0.0, + "output_max": 4.0, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self.loss_eps) + self._TestEnsemble(config) + + @parameterized.parameters( + ([0, 0], [0, 0], 0.003822), + ([1, 1], [0, 0], 0.313155), + ([0, 0], [1, 1], 0.003073), + ([1, 0], [0, 1], 0.162484), + ([0, 0], [1, 0], 0.004883), + ) + def testUnimodalityTwoD(self, monotonicities, unimodalities, expected_loss): + if self.disable_all: + return + + def WShaped2dFunction(x): + distance = lambda x1, y1, x2, y2: ((x2 - x1)**2 + (y2 - y1)**2)**0.5 + d = distance(x[0], x[1], 5.0, 5.0) + return (d - 2.0)**2 / 8.0 + + config = { + "lattice_sizes": [11, 11], + "num_training_records": 900, + "num_training_epoch": 50, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.5, + "x_generator": self._TwoDMeshGrid, + "y_function": WShaped2dFunction, + "monotonicities": monotonicities, + "unimodalities": unimodalities, + "kernel_initializer": "linear_initializer", + "output_min": 0.0, + "output_max": 4.0, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self.loss_eps) + self._TestEnsemble(config) + + def testUnconstrained(self): + if self.disable_all: + return + config = { + "lattice_sizes": [20], + "num_training_records": 100, + "num_training_epoch": 200, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.15, + "x_generator": self._ScatterXUniformly, + "y_function": self._Sin, + "kernel_initializer": keras.initializers.Zeros, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.000917, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [2], + "num_training_records": 100, + "num_training_epoch": 50, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.15, + "x_generator": self._ScatterXUniformly, + "y_function": self._Square, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.004277, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [2, 2], + "num_training_records": 100, + "num_training_epoch": 200, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.15, + "x_generator": self._ScatterXUniformly, + "y_function": test_utils.get_hypercube_interpolation_fn( + coefficients=[0.0, 1.0, 1.0, 1.0]), + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.000003, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [2] * 3, + "num_training_records": 100, + "num_training_epoch": 200, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.15, + "x_generator": self._ScatterXUniformly, + "y_function": test_utils.get_hypercube_interpolation_fn( + coefficients=[i / 2.0**3 for i in range(2**3)]) + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.000001, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [2] * 5, + "num_training_records": 100, + "num_training_epoch": 100, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.5, + "x_generator": self._ScatterXUniformly, + "y_function": test_utils.get_hypercube_interpolation_fn( + coefficients=[i / 2.0**5 for i in range(2**5)]) + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.000008, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [2, 2], + "num_training_records": 100, + "num_training_epoch": 100, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.15, + "x_generator": self._ScatterXUniformly, + "y_function": self._Max, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.003599, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [2] * 6, + "num_training_records": 100, + "num_training_epoch": 300, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 30.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._PseudoLinear, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.000118, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [2, 3, 4], + "num_training_records": 100, + "num_training_epoch": 200, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 10.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._PseudoLinear, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.00002, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [4, 5], + "num_training_records": 100, + "num_training_epoch": 100, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 10.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._WeightedSum, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.0, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [2, 3, 4, 5], + "num_training_records": 100, + "num_training_epoch": 200, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 30.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._Max, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.000891, delta=self.loss_eps) + self._TestEnsemble(config) + + config = { + "lattice_sizes": [2, 3, 4, 5], + "num_training_records": 100, + "num_training_epoch": 200, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 30.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._WeightedSum, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.004216, delta=self.loss_eps) + self._TestEnsemble(config) + + @parameterized.parameters( + ([2, 3, 4], 6.429155), + ([2, 3, 3], 13.390955), + ([2, 2, 3], 22.205267), + ([2, 2, 3, 3], 5.049051), + ([2, 2, 3, 2, 2], 5.3823), + ([2, 2, 3, 3, 2, 2], 67.775276), + ([2, 2, 2, 3, 3, 3], 156.755035), + ([3, 2, 2, 3, 3, 2], 104.419373), + ) + def testEqaulySizedDimsOptimization(self, lattice_sizes, expected_loss): + if self.disable_all: + return + config = { + "lattice_sizes": lattice_sizes, + "num_training_records": 100, + "num_training_epoch": 1, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 10.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._WeightedSum, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self.loss_eps) + + @parameterized.parameters( + ([2, 2, 2, 2, 2, 2], 92), + ([2, 2, 3, 2, 3, 2], 117), + ([2, 2, 2, 2, 3, 3], 102), + ([2, 2, 2, 2, 2, 2, 2, 2, 2], 125), + ([2, 2, 2, 2, 2, 2, 3, 3, 3], 135), + ) + def testGraphSize(self, lattice_sizes, expected_graph_size): + # If this test failed then you modified core lattice interpolation logic in + # a way which increases number of ops in the graph. Or maybe Keras team + # changed something under the hood. Please ensure that this increase is + # unavoidable and try to minimize it. + if self.disable_all: + return + tf.compat.v1.disable_eager_execution() + tf.compat.v1.reset_default_graph() + + layer = ll.Lattice(lattice_sizes=lattice_sizes) + input_tensor = tf.ones(shape=(1, len(lattice_sizes))) + layer(input_tensor) + graph_size = len(tf.compat.v1.get_default_graph().as_graph_def().node) + + self.assertLessEqual(graph_size, expected_graph_size) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensorflow_lattice/python/lib/__init__.py b/tensorflow_lattice/python/lib/__init__.py deleted file mode 100644 index c4daf9f..0000000 --- a/tensorflow_lattice/python/lib/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""TensorFlow Lattice python libraries.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function diff --git a/tensorflow_lattice/python/lib/keypoints_initialization.py b/tensorflow_lattice/python/lib/keypoints_initialization.py deleted file mode 100644 index c1d5100..0000000 --- a/tensorflow_lattice/python/lib/keypoints_initialization.py +++ /dev/null @@ -1,504 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Piecewise linear calibration keypoints initialization functions. - -Piecewise linear calibration requires initialization of its keypoints inputs -and outputs. If these initialization values are known one can use them directly. - -But usually these initialization values are calculated in one of two ways: - -1) As a preprocessing step one calculates the quantiles of some input features -(see function save_quantiles_for_keypoints below). Then during training -this quantile is sub-sampled to the number of keypoints, and these are the -initialization values used (see function load_keypoints_from_quantiles below). -Since the quantiles are independent of the number of keypoints, the quantiles -saved once can be used for training of models with different number of -keypoints, so the saved quantiles can be loaded multiple times during -hyperparameter optimization. - -2) The user knows the input range and the number of keypoints. Use the function -uniform_keypoints_for_signal below to calculate evenly spread initialization -keypoints inputs based on that. - -Notice that in both scenarios it is assumed that the user knows the output range -and the keypoints outputs are initialized linearly along the min and the max -of the output, so the calibration starts as a fully linear model. - -Notice that the keypoints initialization values are saved, so they are no longer -needed in production (inference) time. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import ast -import os - -# Dependency imports -import numpy as np -import six -import tensorflow as tf - -from tensorflow_lattice.python.lib import tools -from tensorflow import gfile -from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import - -_QUANTILES_SUBDIRECTORY = "quantiles" - -# The "feature" name for the label. The labels quantiles will be saved -# to a file whose name is based on this name. We assume that there's no -# regular feature with this name. -_LABEL_FEATURE_NAME = "__label__" - - -def _get_size(o): - # Returns number of elements in o, for o list, dict or tuple - if isinstance(o, dict): - total = 0 - for v in o.values(): - total += _get_size(v) - return total - if isinstance(o, list) or isinstance(o, tuple) or isinstance(o, np.ndarray): - return len(o) - return 1 - - -def _materialize_locally(tensors, num_steps=1, feed_dict=None, safety_size=1e9): - """Materialize the given tensors locally, during initialization. - - Assumes non-distributed environment (uses SingularMonitoredSession). - - Args: - tensors: tensors to be materialized: array or dict. - num_steps: number of steps to run. Usually it's faster/easier to run in - one step, a large batch. Set it to 0 or None to run until queue is - exhausted, when a OutOfRangeError exception is raised -- typically when - an input_fn is set to run for a fixed num_epochs. - feed_dict: optional feed_dict. - safety_size: if num_steps is None and one created input_fn to loop - indefinitely (num_epochs=None), this could loop consuming memory. This - is a safety limit on memory to prevent that. Increase this is you actually - need more than these many elements in your results, or set num_steps. - - Returns: - Materialized tensors as array or dict, like `tensors` arg. - - Raises: - ValueError: for negative num_steps. - tf.errors.OutOfRangeError: if can't read num_steps times. - """ - if num_steps and num_steps < 0: - raise ValueError("can not run with num_steps=%s" % num_steps) - - # tf.compat.v1.train.SingularMonitoredSession silently catches - # tf.errors.OutOfRangeError, and we want to expose it. - error = None - with tf.compat.v1.train.SingularMonitoredSession() as sess: - try: - splits = [] - if not num_steps: - # Run until queue exhausted. - try: - count = 0 - while True: - r = sess.run(tensors, feed_dict=feed_dict) - count += _get_size(r) - if count > safety_size: - raise ValueError( - "Unbound (num_steps=None) materialization of " - "input reached safety size of {}".format(safety_size)) - splits.append(r) - except tf.errors.OutOfRangeError: - pass - else: - # Run num_steps times. - splits = [ - sess.run(tensors, feed_dict=feed_dict) for _ in range(num_steps) - ] - if isinstance(splits[0], dict): - materialized = {} - for k in splits[0].keys(): - materialized[k] = np.concatenate([ - splits[i][k] for i in range(len(splits)) - if splits[i][k].size > 0]) - else: - materialized = np.concatenate(splits) - except (tf.errors.OutOfRangeError, StopIteration) as ex: - error = ex - if error: - raise error # pylint: disable=raising-bad-type - return materialized - - -def _path_for_quantile(subdir, feature_name): - # Change slashes to dashes to make quantile filenames valid. - # Note that there is a slight chance of name collision here. - feature_name = str(feature_name).replace("/", "-") - return os.path.join(subdir, "%s.txt" % feature_name) - - -def _save_quantiles(subdir, feature_name, quantiles): - file_io.write_string_to_file( - _path_for_quantile(subdir, str(feature_name)), str(quantiles)) - - -def _load_quantiles(subdir, feature_name): - """Returns False if failed to load.""" - serialized = file_io.read_file_to_string( - _path_for_quantile(subdir, feature_name)) - return ast.literal_eval(serialized) - - -def uniform_keypoints_for_signal(num_keypoints, - input_min, - input_max, - output_min, - output_max, - dtype=tf.float32): - """Returns a pair of initialization tensors for calibration keypoints. - - This is used when the input range to be calibrated is known. - - Args: - num_keypoints: number of keypoints to use for calibrating this signal. - input_min: Scalar with the minimum value that the uncalibrated input can - take. - input_max: Scalar with the maximum value that the uncalibrated input can - take. - output_min: Scalar with calibrated value associated with input_min. - Typically the minimum expected calibrated value, but not necessarily. - Specially if the calibration is decreasing. - output_max: Scalar with calibrated scalar value associated with input_max. - dtype: If any of the scalars are not given as tensors, they are converted to - tensors with this dtype. - - Returns: - Two tensors to be used as the keypoints_inputs and keypoints_outputs - initialization, uniformly distributed over given ranges. Dtype is given - by input_min, input_max, output_min, output_max. - - Raises: - ValueError: if underlying types (dtype) don't match. - """ - input_min = tools.cast_to_scalar_tensor_of_dtype(input_min, dtype) - input_max = tools.cast_to_scalar_tensor_of_dtype(input_max, dtype) - output_min = tools.cast_to_scalar_tensor_of_dtype(output_min, dtype) - output_max = tools.cast_to_scalar_tensor_of_dtype(output_max, dtype) - types_set = set( - [input_min.dtype, input_max.dtype, output_min.dtype, output_max.dtype]) - if len(types_set) != 1: - raise ValueError("different dtypes for parameters: got %s" % types_set) - return (tf.linspace(input_min, input_max, num_keypoints), - tf.linspace(output_min, output_max, num_keypoints)) - - -def save_quantiles_for_keypoints(input_fn, - save_dir, - feature_columns=None, - num_steps=1, - override=True, - num_quantiles=1000, - dtype=tf.float32): - - """Calculates and saves quantiles for given features and optionally the label. - - These values can later be retrieved and used by keypoints_from_quantiles() - below. - - Repeated values are discarded before the quantiles are calculated. That means - that the quantiles of a very skewed distribution (for instance where 99% - of the values are 0), will be different. But for the purpose of calibration - this approach is more useful. - - Nothing is returned, the values are simply saved in the given location. - - This function can be called as a preprocessing step before actual training - starts. Typically one will run this in a separate process locally, before - starting training for instance. - - Args: - input_fn: Similar to input_fn provided to Estimators. Typically one - doesn't need to go over the full data to get good quantiles. Typically - some 100 random examples per quantile is good enough for the purpose of - calibration. If you don't have too much data, just use everything. - If input_fn returns a label, the label quantiles will be saved into a - file named _LABEL_FEATURE_NAME in '/quantiles' directory and - they can be used to initialize the keypoint outputs by passing True to - the 'use_label_quantiles_for_outputs' in - load_keypoints_from_quantiles(). - save_dir: Where to save these quantiles. Since when optimizing - hyperparameters we train various models, we can share the quantiles - information generated here. So this should be a directory that can be - accessed by all training sessions. A subdirectory called "quantiles" will - be created, and inside one file per feature is created: named after the - feature name, and with the quantiles stored in JSON format. - feature_columns: If set, quantiles are generated for these feature columns. - The file name used to save the quantiles uses a hash of the names of the - feature_columns, so it can support different quantiles sets for different - parts of the model if needed. If not set quantiles will be generated for - all features returned by input_fn. - num_steps: number of steps to take over input_fn to gather enough data to - create quantiles. Set to 0 or None to run until queue is exhausted, - like if you used num_epochs in your input_fn. - override: if False it won't regenerate quantiles for files that are already - there. This works as long as the features definition/distribution hasn't - change from one run to another. - num_quantiles: This value should be larger than the maximum number of - keypoints that will be considered for calibrating these features. If - there are not enough quantiles for the keypoints, the system is robust and - will simply interpolate the missing quantiles. Similarly if there are not - enough examples to represent the quantiles, it will interpolate the - quantiles from the examples given. - dtype: Default dtype to use, in particular for categorical values. - - Returns: Nothing, results are saved to disk. - - Raises: - tf.errors.OpError: For I/O errors. - - FutureWork: - * Use Munro-Paterson algorithm to calculate quantiles in a streaming - fashion. See Squawd library. - * Add support to weighted examples. - * Handle cases where there are not enough different values in quantiles. - """ - subdir = os.path.join(save_dir, _QUANTILES_SUBDIRECTORY) - file_io.recursive_create_dir(subdir) - with tf.Graph().as_default(): - tensor_to_feature = _compute_tensor_to_feature_dict( - input_fn, feature_columns, dtype) - if override: - tensor_to_saved_feature = tensor_to_feature - else: - tensor_to_saved_feature = { - name: tensor - for (name, tensor) in six.iteritems(tensor_to_feature) - if not gfile.Exists(_path_for_quantile(subdir, name))} - materialized_tensors = _materialize_locally( - tensor_to_saved_feature, num_steps) - - percentiles = np.linspace(0., 100., num_quantiles) - for key, values in six.iteritems(materialized_tensors): - values = np.unique(values) - quantiles = np.percentile(values, percentiles, interpolation="nearest") - quantiles = list(quantiles) - _save_quantiles(subdir, key, quantiles) - - -def _compute_tensor_to_feature_dict(input_fn, feature_columns, dtype): - """Computes a feature_name-to-tensor dict for the given features. - - Args: - input_fn: See the same argument in 'save_quantiles_for_keypoints'. - feature_columns: See the same argument in 'save_quantiles_for_keypoints'. - dtype: See the same argument in 'save_quantiles_for_keypoints'. - - Returns: - A str->tensor dict mapping each feature name to the tensor containing its - feature values for the current batch. The dict contains all the features - returned by input_fn if feature_columns are none, or only those features - included in 'feature_columns', otherwise. If a non-None label is returned by - 'input_fn', it will also be included in the dict. - """ - if feature_columns is not None: - transformed_columns_to_tensors, label = input_fn() - features_to_tensors = { - f_col.name: tools.input_from_feature_column( - transformed_columns_to_tensors, f_col, dtype) - for f_col in feature_columns - } - else: - features_to_tensors, label = input_fn() - if label is None: - return features_to_tensors - if _LABEL_FEATURE_NAME in features_to_tensors: - raise ValueError( - ("Can't save a label as there's already a feature named: '%s'." - " Try renaming that feature. ") % _LABEL_FEATURE_NAME) - features_to_tensors[_LABEL_FEATURE_NAME] = label - return features_to_tensors - - -def save_quantiles_for_keypoints_once( - input_fn, save_dir, is_chief, timeout_secs=600, **kwargs): - """Concurrency-safe version of save_quantiles_for_keypoints. - - If is_chief is True and the quantiles do not already exist in 'save_dir', - calls save_quantiles_for_keypoints; otherwise waits for up to timeout_secs - seconds for the quantiles to be created and returns. Raises - tools.SaveOrWaitTimeOutError if the timeout expires before the quantiles have - been created. - - In multi-process tensorflow training, one must ensure that - save_quantiles_for_keypoints is called by a single process before any process - calls load_keypoints_from_quantiles. This function facilitates this, by making - the chief worker save the quantiles and all the other processes wait for the - quantiles to be created. Simply call this function in each process before - the process calls load_keypoints_from_quantiles. - - Note that for a given 'save_dir', the quantiles will only be created on the - first execution of the program. Successive executions will not overwrite the - quantiles. To recreate the quantiles, the save_dir directory must be deleted. - - Args: - input_fn: Passed to save_quantiles_for_keypoints. - save_dir: Passed to save_quantiles_for_keypoints. - is_chief: bool. Whether the caller is the chief. - timeout_secs: int. The amount of time in seconds to wait for the chief. - **kwargs: Other keyword arguments to be passed to - save_quantiles_for_keypoints. - """ - def write_fn(): - save_quantiles_for_keypoints(input_fn, save_dir, **kwargs) - tools.save_once_or_wait_for_chief( - write_fn=write_fn, - metadata_dir=save_dir, - is_chief=is_chief, - timeout_secs=timeout_secs) - - -def load_keypoints_from_quantiles(feature_names, - save_dir, - num_keypoints, - output_min=None, - output_max=None, - use_label_quantiles_for_outputs=False, - reversed_dict=None, - missing_input_values_dict=None, - dtype=tf.float32): - """Retrieves keypoints initialization values for selected features. - - It expects that the quantiles have already been calculated and saved in the - save_dir by the save_quantiles_for_keypoints function. It will raise - an I/O error if not. - - Args: - feature_names: List of features names for which to get keypoints - initialization values. - save_dir: Directory where the quantiles have been saved to. Same value used - when save_quantiles_for_keypoints was called. - num_keypoints: Desired number of keypoints to use for calibration. This can - either be a scalar to be used for all features, or a dict mapping feature - name to num_keypoints. Fewer keypoints than requested can end up being - used when for the given feature there are not enough different values. If - num_keypoints for a feature is missing, None or 0, no initialization is - generated. - output_min: If not None, specifies the initial calibrated value associated - with the first calibration keypoint. The keypoints outputs in between will - be linearly interpolated. It can be given as a scalar, in which case the - value is used for all features, or a dict mapping feature name to - output_min. - output_max: Like output_min, but the calibrated value associated to the last - keypoint. Scalar or dict. - use_label_quantiles_for_outputs: Sets the keypoint outputs (calibrated - values) to the label quantiles. If this parameter is true then output_min - and output_max must both be None and the label quantiles must have been - saved in the call to save_quantiles_for_keypoints that generated the - quantile files (i.e. the input_fn parameter for the latter function must - have returned a label). If this parameter is False, then neither - output_min nor output_max may be None. - reversed_dict: An optional dict. If reversed_dict[feature_name] is True, - then the initial output keypoints will be in reversed order for that - feature, i.e., input_min will be mapped to output_max or the last label - quantile if use_label_quantiles_for_outputs is true, and input_max will be - mapped to output_min or the first label quantile if - use_label_quantiles_for_outputs is true. Reversing output keypoints is - useful for decreasing monotonic calibrators. - missing_input_values_dict: An optional dict. If provided, it should include - all features passed via feature_names. If the value of - missing_input_values[feature_name] is Not none, it is excluded from the - input keypoint values. - dtype: Type to be used for calibration. - - Returns: - Dict of feature name to pair of constant tensors that can be used to - initialize calibrators keypoints inputs and outputs. - - Raises: - tf.errors.NotFoundError: if quantiles file not found. - - - values in the signal. This would probably be better handled as categorical, - but still this should handle the case correctly. - """ - if (output_min is None) != (output_max is None): - raise ValueError( - "Either both output_min and output_max should be given or neither.") - - output_labels_given = (output_min is not None) - if (use_label_quantiles_for_outputs and output_labels_given): - raise ValueError( - "If use_label_quantiles_for_outputs is true, then" - " output_min and output_max cannot be given.") - if (not use_label_quantiles_for_outputs and not output_labels_given): - raise ValueError( - "Either use_label_quantiles_for_outputs should be true or " - " output_min and output_max must be given.") - - subdir = os.path.join(save_dir, _QUANTILES_SUBDIRECTORY) - num_keypoints = tools.cast_to_dict(num_keypoints, feature_names, - num_keypoints) - if use_label_quantiles_for_outputs: - label_quantiles = _load_quantiles(subdir, _LABEL_FEATURE_NAME) - else: - label_quantiles = None - output_min = tools.cast_to_dict_of_tensor_scalars(output_min, feature_names, - dtype, "output_min") - output_max = tools.cast_to_dict_of_tensor_scalars(output_max, feature_names, - dtype, "output_max") - keypoints = {} - for feature_name in feature_names: - if feature_name not in num_keypoints or not num_keypoints[feature_name]: - continue - all_quantiles = _load_quantiles(subdir, feature_name) - if (missing_input_values_dict is not None and - feature_name in missing_input_values_dict): - exclude_val = missing_input_values_dict[feature_name] - if exclude_val is not None: - all_quantiles = [q for q in all_quantiles if q != exclude_val] - quantiles = _resample_quantiles(all_quantiles, num_keypoints[feature_name]) - unique_quantiles = sorted(set(quantiles)) - input_keypoints = tf.constant( - unique_quantiles, shape=[len(unique_quantiles)], dtype=dtype) - if use_label_quantiles_for_outputs: - output_keypoints = tf.constant( - _resample_quantiles(label_quantiles, len(unique_quantiles)), - shape=[len(unique_quantiles)], - dtype=dtype) - else: - output_keypoints = tf.linspace(output_min[feature_name], - output_max[feature_name], - len(unique_quantiles)) - if reversed_dict is not None and reversed_dict[feature_name]: - output_keypoints = tf.reverse(output_keypoints, axis=[0]) - keypoints[feature_name] = (input_keypoints, output_keypoints) - return keypoints - - -def _resample_quantiles(quantiles, new_size): - """Computes new-size-quantiles on the given array of quantiles. - - This is roughly equivalent to computing new-size-quantiles on the - original data from which 'quantiles' was created. - - Args: - quantiles: list. The original quantiles. - new_size: int. The number of quantiles to generate. - Returns: - A list of the new quantiles. - """ - percentiles = np.linspace(0., 100., new_size) - return np.percentile(quantiles, percentiles, interpolation="nearest") diff --git a/tensorflow_lattice/python/lib/keypoints_initialization_test.py b/tensorflow_lattice/python/lib/keypoints_initialization_test.py deleted file mode 100644 index db61575..0000000 --- a/tensorflow_lattice/python/lib/keypoints_initialization_test.py +++ /dev/null @@ -1,480 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for TensorFlow Lattice's keypoints_initialization module.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math -import os - -from absl.testing import parameterized -import numpy as np -import tensorflow as tf - -from tensorflow_lattice.python.lib import keypoints_initialization - - -class KeypointsInitializationTestCase(tf.test.TestCase, parameterized.TestCase): - - def testMaterializeLocally(self): - num_examples = 100 - x = np.random.uniform(0.0, 1.0, size=num_examples) - - # Read to the end of a number of epochs. - input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x={'x': x}, batch_size=13, num_epochs=1, shuffle=False) - results = keypoints_initialization._materialize_locally( - tensors=input_fn(), num_steps=None) - self.assertLen(results['x'], num_examples) - input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x={'x': x}, batch_size=13, num_epochs=2, shuffle=False) - results = keypoints_initialization._materialize_locally( - tensors=input_fn(), num_steps=None) - self.assertLen(results['x'], 2 * num_examples) - - # Read a certain number of steps: just enough to read all data (last - # batch will only be partially fulfilled). - input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x={'x': x}, batch_size=13, num_epochs=1, shuffle=False) - results = keypoints_initialization._materialize_locally( - tensors=input_fn(), num_steps=1) - self.assertLen(results['x'], 13) - - input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x={'x': x}, batch_size=13, num_epochs=1, shuffle=False) - results = keypoints_initialization._materialize_locally( - tensors=input_fn(), num_steps=8) - self.assertLen(results['x'], num_examples) - - # Try to read beyond end of input, with num_steps set. - input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x={'x': x}, batch_size=13, num_epochs=1, shuffle=False) - with self.assertRaises(tf.errors.OutOfRangeError): - results = keypoints_initialization._materialize_locally( - tensors=input_fn(), num_steps=100) - - # Try to read beyond safety limit. - input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x={'x': x}, batch_size=13, num_epochs=None, shuffle=False) - with self.assertRaises(ValueError): - results = keypoints_initialization._materialize_locally( - tensors=input_fn(), num_steps=None, safety_size=1000) - - def _BuildInputs(self, x0, x1, x2, label=None): - """Returns input_fn, feature_names and feature_columns.""" - - def _input_fn(): - features = { - 'x0': tf.constant(x0, dtype=tf.float32), - 'x1': tf.constant(x1, dtype=tf.float32), - 'x2': tf.constant(x2, dtype=tf.float32), - } - if label is None: - return features, None - return features, tf.constant(label, dtype=tf.float32) - - feature_names = ['x0', 'x1', 'x2'] - feature_columns = set( - [tf.feature_column.numeric_column(key=fn) for fn in feature_names]) - return _input_fn, feature_names, feature_columns - - def _CheckSaveQuantilesForKeypoints(self, name, num_examples, num_steps, x0, - x1, x2, use_feature_columns, override): - input_fn, feature_names, feature_columns = self._BuildInputs(x0, x1, x2) - save_dir = os.path.join(self.get_temp_dir(), name) - keypoints_initialization.save_quantiles_for_keypoints( - input_fn, - save_dir, - feature_columns=(feature_columns if use_feature_columns else None), - num_quantiles=5, - override=override) - - # Check by reading files directly. - subdir = os.path.join(save_dir, - keypoints_initialization._QUANTILES_SUBDIRECTORY) - quantiles_x0 = keypoints_initialization._load_quantiles(subdir, 'x0') - quantiles_x1 = keypoints_initialization._load_quantiles(subdir, 'x1') - quantiles_x2 = keypoints_initialization._load_quantiles(subdir, 'x2') - self.assertAllClose( - quantiles_x0, [0, 2.5**2, 5.**2, 7.5**2, 100.], atol=0.2) - self.assertAllClose( - quantiles_x1, - [1., math.pow(10., 0.5), 10.0, - math.pow(10., 1.5), 100.], - atol=0.2) - # x2 should start with [0,0,...] and end in [..., 1, 1], the middle value - # can be either 0 or 1. - self.assertAllClose(quantiles_x2[0:2], [0., 0.], atol=1e-3) - self.assertAllClose(quantiles_x2[-2:], [1., 1.], atol=1e-3) - - # New graph is needed because default graph is changed by save - # keypoints, and self.session() will by default try to reuse a cached - # session, with a different graph. - with tf.Graph().as_default() as g: - # Check by using load_keypoints_from_quantiles. - keypoints_init = keypoints_initialization.load_keypoints_from_quantiles( - feature_names, - save_dir, - 3, - output_min={ - 'x0': 0., - 'x1': 1., - 'x2': 7. - }, - output_max={ - 'x0': 1., - 'x1': 10., - 'x2': 13. - }) - with self.session(graph=g) as sess: - keypoints_init = sess.run(keypoints_init) - self.assertAllClose(keypoints_init['x0'][0], [0, 5.**2, 100.], atol=0.2) - self.assertAllClose(keypoints_init['x0'][1], [0., 0.5, 1.]) - self.assertAllClose(keypoints_init['x1'][0], [1., 10.0, 100.], atol=0.2) - self.assertAllClose(keypoints_init['x1'][1], [1., 5.5, 10.]) - - # Notice x2 only has 2 unique values, so it should have lowered the - # num_keypoints to 2. - self.assertAllClose([0., 1.0], keypoints_init['x2'][0], atol=1e-3) - self.assertAllClose([7., 13.0], keypoints_init['x2'][1], atol=1e-3) - - # Check that load_keypoints_from_quantiles don't generate anything - # if num_keypoints is 0 or unset. - with tf.Graph().as_default() as g: - # Check by using load_keypoints_from_quantiles. - keypoints_init = keypoints_initialization.load_keypoints_from_quantiles( - feature_names, - save_dir, { - 'x0': 3, - 'x2': 3, - 'x1': 0 - }, - output_min={ - 'x0': 0., - 'x1': 1., - 'x2': 7. - }, - output_max={ - 'x0': 1., - 'x1': 10., - 'x2': 13. - }) - with self.session(graph=g) as sess: - keypoints_init = sess.run(keypoints_init) - self.assertIn('x0', keypoints_init) - self.assertIn('x2', keypoints_init) - self.assertNotIn('x1', keypoints_init) - - def testSaveQuantilesForKeypoints(self): - """Tests quantiles are being calculated correctly.""" - num_examples = 100000 - num_steps = num_examples / num_examples - - # Verify for randomized input: try with/without feature_columns. - x0 = np.random.uniform(0.0, 10.0, size=num_examples) - x0 = np.square(x0) - x1 = np.random.uniform(0.0, 2.0, size=num_examples) - x1 = np.power(10., x1) - x2 = np.random.randint(0, 2, size=num_examples).astype(float) - self._CheckSaveQuantilesForKeypoints( - 'save_quantiles_for_keypoints', - num_examples, - num_steps, - x0, - x1, - x2, - use_feature_columns=True, - override=True) - self._CheckSaveQuantilesForKeypoints( - 'save_quantiles_for_keypoints', - num_examples, - num_steps, - x0, - x1, - x2, - use_feature_columns=False, - override=False) - - # Second change: since we are not overriding, it shouldn't regenerate the - # results. So we provide "wrong data": if the quantiles are regenerated - # the test will fail. - x0 = np.linspace(0.0, 1.0, num_examples) - x1 = np.linspace(0.0, 1.0, num_examples) - x2 = np.array([2.] * num_examples) - self._CheckSaveQuantilesForKeypoints( - 'save_quantiles_for_keypoints', - num_examples, - num_steps, - x0, - x1, - x2, - use_feature_columns=False, - override=False) - - # Verify that things work on a non-randomized set: this will break - # if not all input is being considered. - x0 = np.linspace(0.0, 10.0, num_examples) - x0 = np.square(x0) - x1 = np.linspace(0.0, 2.0, num_examples) - x1 = np.power(10., x1) - x2 = np.array([0.] * int(num_examples / 2) + [1.] * int(num_examples / 2)) - self._CheckSaveQuantilesForKeypoints( - 'save_quantiles_for_keypoints', - num_examples, - num_steps, - x0, - x1, - x2, - use_feature_columns=False, - override=True) - - def testSaveQuantilesForKeypointsSavingLabelQuantiles(self): - input_fn, unused_feature_names, unused_feature_columns = self._BuildInputs( - x0=[0], x1=[0], x2=[0], label=np.random.uniform(0.0, 100.0, 100000)) - save_dir = os.path.join(self.get_temp_dir(), - 'save_quantiles_for_keypoints_saving_labels') - keypoints_initialization.save_quantiles_for_keypoints( - input_fn, save_dir, override=True, num_quantiles=5) - subdir = os.path.join(save_dir, - keypoints_initialization._QUANTILES_SUBDIRECTORY) - quantiles = keypoints_initialization._load_quantiles( - subdir, keypoints_initialization._LABEL_FEATURE_NAME) - self.assertAllClose( - np.linspace(0, 100.0, 5), - quantiles, - atol=0.2, - msg=('quantiles saved by save_quantiles_for_keypoints() do not much' - ' expected quantiles')) - - def testLoadKeypointsFromQuantilesLoadingLabelQuantiles(self): - input_fn, unused_feature_names, unused_feature_columns = self._BuildInputs( - x0=np.random.uniform(0.0, 2.0, 100000), - x1=[0], - x2=[0], - label=np.random.uniform(0.0, 100.0, 100000)) - save_dir = os.path.join( - self.get_temp_dir(), - 'load_keypoints_from_quantiles_loading_label_quantiles') - keypoints_initialization.save_quantiles_for_keypoints( - input_fn, save_dir, override=True) - with tf.Graph().as_default() as g, self.session(graph=g) as session: - result = keypoints_initialization.load_keypoints_from_quantiles( - feature_names=['x0'], - save_dir=save_dir, - num_keypoints=5, - use_label_quantiles_for_outputs=True) - result = session.run(result) - self.assertAllClose( - { - 'x0': [ - np.array([0.0, 0.5, 1.0, 1.5, 2.0]), - np.array([0.0, 25.0, 50.0, 75.0, 100.0]) - ] - }, - result, - atol=0.2, - msg='load_keypoints_from_quantiles didn\'t produce expected labels') - - @parameterized.named_parameters( - { - 'testcase_name': 'both_output_and_label_quantiles', - 'msg': ('Expected an exception when both output_min, output_max are ' - 'given and use_label_quantiles_for_outputs is True'), - 'use_label_quantiles_for_outputs': True, - 'output_min': 0.0, - 'output_max': 1.0 - }, { - 'testcase_name': 'output_min_and_not_output_max', - 'msg': - ('Expected an exception when output_min is given and output_max' - ' isn\'t'), - 'use_label_quantiles_for_outputs': True, - 'output_min': 0.0, - 'output_max': None - }, { - 'testcase_name': 'output_max_and_not_output_min', - 'msg': - ('Expected an exception when output_max is given and output_min' - ' isn\'t'), - 'use_label_quantiles_for_outputs': True, - 'output_min': None, - 'output_max': 1.0 - }, { - 'testcase_name': 'neither_output_nor_label_quantiles', - 'msg': - ('Expected an exception when output_min, output_max are not given' - ' and use_label_quantiles_for_outputs is False'), - 'use_label_quantiles_for_outputs': False, - 'output_min': None, - 'output_max': None - }) - def testLoadKeypointsFromQuantilesRaises(self, - use_label_quantiles_for_outputs, - output_min, output_max, msg): - input_fn, unused_feature_names, unused_feature_columns = self._BuildInputs( - x0=np.random.uniform(0.0, 2.0, 100000), - x1=[0], - x2=[0], - label=np.random.uniform(0.0, 100.0, 100000)) - save_dir = os.path.join( - self.get_temp_dir(), - 'load_keypoints_from_quantiles_loading_label_quantiles') - keypoints_initialization.save_quantiles_for_keypoints( - input_fn, save_dir, override=True) - with self.assertRaises(ValueError, msg=msg): - keypoints_initialization.load_keypoints_from_quantiles( - use_label_quantiles_for_outputs=use_label_quantiles_for_outputs, - output_min=output_min, - output_max=output_max, - feature_names=['x0'], - save_dir=save_dir, - num_keypoints=5) - - def testQuantileInitWithReversedDict(self): - num_examples = 100 - x0 = np.linspace(0.0, 10.0, num_examples) - x1 = np.linspace(0.0, 10.0, num_examples) - x2 = np.linspace(0.0, 1.0, num_examples) - - input_fn, feature_names, feature_columns = self._BuildInputs(x0, x1, x2) - save_dir = os.path.join(self.get_temp_dir(), 'reversed_dict') - keypoints_initialization.save_quantiles_for_keypoints( - input_fn, - save_dir, - feature_columns=feature_columns, - num_quantiles=100, - override=True) - reversed_dict = {'x0': False, 'x1': True, 'x2': False} - - with tf.Graph().as_default() as g: - # Check by using load_keypoints_from_quantiles. - keypoints_init = keypoints_initialization.load_keypoints_from_quantiles( - feature_names, - save_dir, - num_keypoints=3, - output_min={ - 'x0': 0., - 'x1': 0., - 'x2': 0. - }, - output_max={ - 'x0': 1., - 'x1': 1., - 'x2': 1. - }, - reversed_dict=reversed_dict) - with self.session(graph=g) as sess: - keypoints_init = sess.run(keypoints_init) - - self.assertAllClose(keypoints_init['x0'][0], [0.0, 5.0, 10.0], atol=0.1) - self.assertAllClose(keypoints_init['x0'][1], [0.0, 0.5, 1.0], atol=0.01) - self.assertAllClose(keypoints_init['x1'][0], [0.0, 5.0, 10.0], atol=0.1) - self.assertAllClose(keypoints_init['x1'][1], [1.0, 0.5, 0.0], atol=0.01) - self.assertAllClose(keypoints_init['x2'][0], [0.0, 0.5, 1.0], atol=0.01) - self.assertAllClose(keypoints_init['x2'][1], [0.0, 0.5, 1.0], atol=0.01) - - def testQuantileInitWithMissingInputValuesDict(self): - num_examples = 10 - x0 = np.linspace(-1.0, 1.0, num_examples) - x1 = np.linspace(0.0, 1.0, num_examples) - x2 = np.linspace(0.0, 1.0, num_examples) - - input_fn, feature_names, feature_columns = self._BuildInputs(x0, x1, x2) - save_dir = os.path.join(self.get_temp_dir(), 'exclude_input_values_dict') - keypoints_initialization.save_quantiles_for_keypoints( - input_fn, - save_dir, - feature_columns=feature_columns, - num_quantiles=num_examples, - override=True) - - with tf.Graph().as_default() as g: - # Check by using load_keypoints_from_quantiles. - keypoints_init = keypoints_initialization.load_keypoints_from_quantiles( - feature_names, - save_dir, - num_keypoints=3, - output_min={ - 'x0': 0., - 'x1': 0., - 'x2': 0. - }, - output_max={ - 'x0': 1., - 'x1': 1., - 'x2': 1. - }, - missing_input_values_dict={ - 'x0': -1.0, - 'x1': 0.0, - 'x2': None - }, - ) - with self.session(graph=g) as sess: - keypoints_init = sess.run(keypoints_init) - - self.assertAllClose(keypoints_init['x0'][0], [-0.778, 0.111, 1.0], atol=0.1) - self.assertAllClose(keypoints_init['x0'][1], [0.0, 0.5, 1.0], atol=0.01) - self.assertAllClose(keypoints_init['x1'][0], [0.111, 0.556, 1.0], atol=0.1) - self.assertAllClose(keypoints_init['x1'][1], [0.0, 0.5, 1.0], atol=0.01) - self.assertAllClose(keypoints_init['x2'][0], [0.0, 0.444, 1.0], atol=0.01) - self.assertAllClose(keypoints_init['x2'][1], [0.0, 0.5, 1.0], atol=0.01) - - def testUniformKeypointsForSignal(self): - # New graph is needed because default graph is changed by save - # keypoints, and self.session() will by default try to reuse a cached - # session, with a different graph. - with tf.Graph().as_default() as g: - keypoints_init = keypoints_initialization.uniform_keypoints_for_signal( - num_keypoints=5, - input_min=tf.constant(0.0, dtype=tf.float64), - input_max=tf.constant(1.0, dtype=tf.float64), - output_min=10, - output_max=100, - dtype=tf.float64) - self.assertEqual(keypoints_init[0].dtype, tf.float64) - self.assertEqual(keypoints_init[1].dtype, tf.float64) - with self.session(graph=g) as sess: - keypoints_init = sess.run(keypoints_init) - self.assertAllClose(keypoints_init[0], [0., 0.25, 0.5, 0.75, 1.]) - self.assertAllClose(keypoints_init[1], [10., 32.5, 55., 77.5, 100.]) - - def testSaveQuantilesForKeypointsOnce(self): - """Verifies that save_quantiles_for_keypoints_once doesn't raise exceptions. - - We don't test anything else here since save_quantiles_for_keypoints_once - is a thin wrapper around save_once_or_wait_for_chief which is already - tested. - """ - num_examples = 10 - x0 = np.linspace(-1.0, 1.0, num_examples) - x1 = np.linspace(0.0, 1.0, num_examples) - x2 = np.linspace(0.0, 1.0, num_examples) - - input_fn, _, feature_columns = self._BuildInputs(x0, x1, x2) - save_dir = os.path.join(self.get_temp_dir(), 'exclude_input_values_dict') - keypoints_initialization.save_quantiles_for_keypoints_once( - input_fn, - save_dir, - is_chief=True, - feature_columns=feature_columns, - num_quantiles=num_examples, - override=True) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_lattice/python/lib/lattice_layers.py b/tensorflow_lattice/python/lib/lattice_layers.py deleted file mode 100644 index 5525324..0000000 --- a/tensorflow_lattice/python/lib/lattice_layers.py +++ /dev/null @@ -1,404 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Lattice layers library for TensorFlow Lattice. - -Lattice is an interpolated lookup table (LUT), part of TensorFlow Lattice -models. - -This modules provides functions used when building models, as opposed to the -basic operators exported by lattice_ops.py -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import functools - -import tensorflow as tf - -from tensorflow_lattice.python.lib import regularizers -from tensorflow_lattice.python.lib import tools -from tensorflow_lattice.python.ops import lattice_ops -from tensorflow_lattice.python.ops.gen_monotone_lattice import monotone_lattice - -_VALID_INTERPOLATION_TYPES = ['hypercube', 'simplex'] - - -def lattice_param_as_linear(lattice_sizes, output_dim, linear_weights=1.0): - """Returns lattice parameter that represents a normalized linear function. - - For simplicity, let's assume output_dim == 1 (when output_dim > 1 you get - output_dim lattices one for each linear function). This function returns a - lattice parameter so that - - lattice_param' * phi(x) = 1 / len(lattice_sizes) * - (sum_k x[k] * linear_weights[k]/(lattice_sizes[k] - 1) + bias) - - where phi(x) is the lattice interpolation weight and - bias = -sum_k linear_weights[k] / 2. - - The normalization in the weights and the bias term make the output lie in the - range [-0.5, 0.5], when every member of linear_weights is 1.0. - In addition, the bias term makes the expected value zero when x[k] is from the - uniform distribution over [0, lattice_sizes[k] - 1]. - - The returned lattice_param can be used to initialize a lattice layer as a - linear function. - - Args: - lattice_sizes: (list of ints) A list of lattice sizes of each dimension. - output_dim: (int) number of outputs. - linear_weights: (float, list of floats, list of list of floats) linear - function's weight terms. linear_weights[k][n] == kth output's nth weight. - If float, then all the weights uses one value as [[linear_weights] * - len(lattice_sizes)] * output_dim. If list of floats, then the - len(linear_weights) == len(lattice_sizes) is expected, and the weights are - [linear_weights] * output_dim, i.e., all output_dimension will get same - linear_weights. - - Returns: - List of list of floats with size (output_dim, number_of_lattice_param). - Raises: - ValueError: * Any element in lattice_sizes is less than 2. - * lattice_sizes is empty. - * If linear_weights is not supported type, or shape of linear_weights are - not the desired values . - """ - if not lattice_sizes: - raise ValueError('lattice_sizes should not be empty') - for lattice_size in lattice_sizes: - if lattice_size < 2: - raise ValueError('All elements in lattice_sizes are expected to greater ' - 'than equal to 2, but got %s' % lattice_sizes) - - lattice_rank = len(lattice_sizes) - linear_weight_matrix = None - if isinstance(linear_weights, float): - linear_weight_matrix = [[linear_weights] * lattice_rank] * output_dim - elif isinstance(linear_weights, list): - # Branching using the first element in linear_weights. linear_weights[0] - # should exist, since lattice_sizes is not empty. - if isinstance(linear_weights[0], float): - if len(linear_weights) != lattice_rank: - raise ValueError( - 'A number of elements in linear_weights (%d) != lattice rank (%d)' % - (len(linear_weights), lattice_rank)) - # Repeating same weights for all output_dim. - linear_weight_matrix = [linear_weights] * output_dim - elif isinstance(linear_weights[0], list): - # 2d matrix case. - if len(linear_weights) != output_dim: - raise ValueError( - 'A number of lists in linear_weights (%d) != output_dim (%d)' % - (len(linear_weights), output_dim)) - for linear_weight in linear_weights: - if len(linear_weight) != lattice_rank: - raise ValueError( - 'linear_weights contain more than one list whose length != ' - 'lattice rank(%d)' % lattice_rank) - linear_weight_matrix = linear_weights - else: - raise ValueError( - 'Only list of float or list of list of floats are supported') - else: - raise ValueError( - 'Only float or list of float or list of list of floats are supported.') - - # Create lattice structure to enumerate (index, lattice_dim) pairs. - lattice_structure = tools.LatticeStructure(lattice_sizes) - - # Normalize linear_weight_matrix. - lattice_parameters = [] - for linear_weight_per_output in linear_weight_matrix: - sum_of_weights = 0.0 - for weight in linear_weight_per_output: - sum_of_weights += weight - sum_of_weights /= (2.0 * lattice_rank) - lattice_parameter = [-sum_of_weights] * lattice_structure.num_vertices - for (idx, vertex) in tools.lattice_indices_generator(lattice_structure): - for dim in range(lattice_rank): - lattice_parameter[idx] += ( - linear_weight_per_output[dim] * float(vertex[dim]) / - float(lattice_rank * (lattice_sizes[dim] - 1))) - lattice_parameters.append(lattice_parameter) - - return lattice_parameters - - -def lattice_param_as_linear_monotonic(lattice_sizes, - output_dim, - is_monotone=True, - output_min=-0.5, - output_max=0.5): - """Returns lattice parameter that represents a monotonic linear function. - - The returned lattice_param can be used to initialize a lattice layer as a - linear function. The linear coefficients are such that the function is - uniformly increasing in the specified monotonic features and has 0 coefficient - in others, spanning the given range. - - Args: - lattice_sizes: (list of ints) A list of lattice sizes of each dimension. - output_dim: (int) number of outputs. - is_monotone: (bool or list of bools) monotonicity constraint for each of the - input dimensions. The output forms a linear function that is monotonic in - the specified dimensions and has 0 coefficients for non-monotonic - dimensions. All-false monotonicity is not supported. - output_min: (float) The value of the linear function when all inputs are at - minimum. - output_max: (float) The value of the linear function when all inputs are at - maximum. - - Returns: - List of list of floats with size (output_dim, number_of_lattice_param). - Raises: - ValueError: * Any element in lattice_sizes is less than 2. - * lattice_sizes is empty. - * If no feature is monotonic. - """ - # Find linear_weights that sum to len(lattice_sizes), such that all - # non-monotonic inputs that 0 coefficients and all monotonic inputs have the - # same coefficients. - if isinstance(is_monotone, bool): - is_monotone = [is_monotone] * len(lattice_sizes) - is_monotone_float = [float(m) for m in is_monotone] - n_monotone_dims = sum(is_monotone_float) - if n_monotone_dims == 0: - raise ValueError( - 'At least one feature for the lattice parameters linear initialization ' - 'needs to be monotonic') - linear_weights = [ - m * len(lattice_sizes) / n_monotone_dims for m in is_monotone_float - ] - - # With linear_weights that sum to len(lattice_sizes) calling - # lattice_param_as_linear will return a linear function in range -0.5 and 0.5. - # Shift and scale the parameters to have the range: (output_min, output_max). - lattice_initializer = lattice_param_as_linear(lattice_sizes, output_dim, - linear_weights) - lattice_initializer = [[ - output_min + (v + 0.5) * (output_max - output_min) for v in l - ] for l in lattice_initializer] - return lattice_initializer - - -def lattice_layer(input_tensor, - lattice_sizes, - is_monotone=None, - output_min=None, - output_max=None, - output_dim=1, - interpolation_type='hypercube', - lattice_initializer=None, - **regularizer_amounts): - """Creates a lattice layer. - - Returns an output of lattice, lattice parameters, and projection ops. - - Args: - input_tensor: [batch_size, input_dim] tensor. - lattice_sizes: A list of lattice sizes of each dimension. - is_monotone: A list of input_dim booleans, boolean or None. If None or - False, lattice will not have monotonicity constraints. If is_monotone[k] - == True, then the lattice output has the non-decreasing monotonicity with - respect to input_tensor[?, k] (the kth coordinate). If True, all the input - coordinate will have the non-decreasing monotonicity. - output_min: Optional output lower bound. - output_max: Optional output upper bound. - output_dim: Number of outputs. - interpolation_type: 'hypercube' or 'simplex'. - lattice_initializer: (Optional) Initializer for lattice parameter vectors, a - 2D tensor [output_dim, parameter_dim] (where parameter_dim == - lattice_sizes[0] * ... * lattice_sizes[input_dim - 1]). If None, - lattice_param_as_linear initializer will be used with linear_weights=[1] * - len(lattice_sizes). - **regularizer_amounts: Keyword args of regularization amounts passed to - regularizers.lattice_regularization(). Keyword names should be among - regularizers.LATTICE_ONE_DIMENSIONAL_REGULARIZERS or - regularizers.LATTICE_MULTI_DIMENSIONAL_REGULARIZERS. For multi-dimensional - regularizers the value should be float. For one-dimensional regularizers - the values should be float or list of floats. If a single float value is - provided, then all dimensions will get the same value. - - Returns: - A tuple of: - * output tensor of shape [batch_size, output_dim] - * parameter tensor of shape [output_dim, parameter_dim] - * None or projection ops, that must be applied at each - step (or every so many steps) to project the model to a feasible space: - used for bounding the outputs or for imposing monotonicity. - * None or a regularization loss, if regularization is configured. - - Raises: - ValueError: for invalid parameters. - """ - if interpolation_type not in _VALID_INTERPOLATION_TYPES: - raise ValueError('interpolation_type should be one of {}'.format( - _VALID_INTERPOLATION_TYPES)) - - if lattice_initializer is None: - linear_weights = [1.0] * len(lattice_sizes) - lattice_initializer = lattice_param_as_linear( - lattice_sizes, output_dim, linear_weights=linear_weights) - - parameter_tensor = tf.compat.v1.get_variable( - interpolation_type + '_lattice_parameters', - initializer=lattice_initializer) - - output_tensor = lattice_ops.lattice( - input_tensor, - parameter_tensor, - lattice_sizes, - interpolation_type=interpolation_type) - - with tf.name_scope('lattice_monotonic_projection'): - if is_monotone or output_min is not None or output_max is not None: - projected_parameter_tensor = parameter_tensor - if is_monotone: - is_monotone = tools.cast_to_list(is_monotone, len(lattice_sizes), - 'is_monotone') - projected_parameter_tensor = monotone_lattice( - projected_parameter_tensor, - lattice_sizes=lattice_sizes, - is_monotone=is_monotone) - - if output_min is not None: - projected_parameter_tensor = tf.maximum(projected_parameter_tensor, - output_min) - - if output_max is not None: - projected_parameter_tensor = tf.minimum(projected_parameter_tensor, - output_max) - - delta = projected_parameter_tensor - parameter_tensor - projection_ops = [parameter_tensor.assign_add(delta)] - else: - projection_ops = None - - with tf.name_scope('lattice_regularization'): - reg = regularizers.lattice_regularization(parameter_tensor, lattice_sizes, - **regularizer_amounts) - - return (output_tensor, parameter_tensor, projection_ops, reg) - - -def ensemble_lattices_layer(input_tensor, - lattice_sizes, - structure_indices, - is_monotone=None, - output_dim=1, - interpolation_type='hypercube', - lattice_initializers=None, - **regularizer_amounts): - """Creates a ensemble of lattices layer. - - Returns a list of output of lattices, lattice parameters, and projection ops. - - Args: - input_tensor: [batch_size, input_dim] tensor. - lattice_sizes: A list of lattice sizes of each dimension. - structure_indices: A list of list of ints. structure_indices[k] is a list of - indices that belongs to kth lattices. - is_monotone: A list of input_dim booleans, boolean or None. If None or - False, lattice will not have monotonicity constraints. If is_monotone[k] - == True, then the lattice output has the non-decreasing monotonicity with - respect to input_tensor[?, k] (the kth coordinate). If True, all the input - coordinate will have the non-decreasing monotonicity. - output_dim: Number of outputs. - interpolation_type: 'hypercube' or 'simplex'. - lattice_initializers: (Optional) A list of initializer for each lattice - parameter vectors. lattice_initializer[k] is a 2D tensor [output_dim, - parameter_dim[k]], where parameter_dim[k] is the number of parameter in - the kth lattice. If None, lattice_param_as_linear initializer will be used - with linear_weights=[1 if monotone else 0 for monotone in is_monotone]. - **regularizer_amounts: Keyword args of regularization amounts passed to - regularizers.lattice_regularization(). Keyword names should be among - regularizers.LATTICE_ONE_DIMENSIONAL_REGULARIZERS or - regularizers.LATTICE_MULTI_DIMENSIONAL_REGULARIZERS. For multi-dimensional - regularizers the value should be float. For one-dimensional regularizers - the values should be float or list of floats. If a single float value is - provided, then all dimensions will get the same value. - - Returns: - A tuple of: - * a list of output tensors, [batch_size, output_dim], with length - len(structure_indices), i.e., one for each lattice. - * a list of parameter tensors shape [output_dim, parameter_dim] - * None or projection ops, that must be applied at each - step (or every so many steps) to project the model to a feasible space: - used for bounding the outputs or for imposing monotonicity. - * None or a regularization loss, if regularization is configured. - """ - num_lattices = len(structure_indices) - lattice_initializers = tools.cast_to_list(lattice_initializers, num_lattices, - 'lattice initializers') - one_dimensional_regularizers = \ - regularizers.LATTICE_ONE_DIMENSIONAL_REGULARIZERS - for regularizer_name in regularizer_amounts: - if regularizer_name in one_dimensional_regularizers: - regularizer_amounts[regularizer_name] = tools.cast_to_list( - regularizer_amounts[regularizer_name], len(lattice_sizes), - regularizer_name) - - # input_slices[k] = input_tensor[:, k]. - input_slices = tf.unstack(input_tensor, axis=1) - - output_tensors = [] - param_tensors = [] - projections = [] - regularization = None - if is_monotone: - is_monotone = tools.cast_to_list(is_monotone, len(lattice_sizes), - 'is_monotone') - # Now iterate through structure_indices to construct lattices. - get_indices = lambda indices, iterable: [iterable[index] for index in indices] - for (cnt, structure) in enumerate(structure_indices): - with tf.compat.v1.variable_scope('lattice_%d' % cnt): - sub = functools.partial(get_indices, structure) - sub_lattice_sizes = sub(lattice_sizes) - sub_is_monotone = None - if is_monotone: - sub_is_monotone = sub(is_monotone) - - sub_input_tensor_list = sub(input_slices) - sub_input_tensor = tf.stack(sub_input_tensor_list, axis=1) - - sub_regularizer_amounts = {} - for regularizer_name in regularizer_amounts: - if regularizer_name in one_dimensional_regularizers: - sub_regularizer_amounts[regularizer_name] = sub( - regularizer_amounts[regularizer_name]) - else: - sub_regularizer_amounts[regularizer_name] = regularizer_amounts[ - regularizer_name] - - packed_results = lattice_layer( - sub_input_tensor, - sub_lattice_sizes, - sub_is_monotone, - output_dim=output_dim, - interpolation_type=interpolation_type, - lattice_initializer=lattice_initializers[cnt], - **sub_regularizer_amounts) - (sub_output, sub_param, sub_proj, sub_reg) = packed_results - - output_tensors.append(sub_output) - param_tensors.append(sub_param) - if sub_proj: - projections += sub_proj - regularization = tools.add_if_not_none(regularization, sub_reg) - - return (output_tensors, param_tensors, projections, regularization) diff --git a/tensorflow_lattice/python/lib/lattice_layers_test.py b/tensorflow_lattice/python/lib/lattice_layers_test.py deleted file mode 100644 index bfa609f..0000000 --- a/tensorflow_lattice/python/lib/lattice_layers_test.py +++ /dev/null @@ -1,628 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for TensorFlow Lattice's lattice_layers module.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from tensorflow_lattice.python.lib import lattice_layers - - -class LatticeParamTestCase(tf.test.TestCase): - - def testTwoByTwoOneOutput(self): - lattice_param = lattice_layers.lattice_param_as_linear( - lattice_sizes=[2, 2], output_dim=1) - self.assertAllClose([[-0.5, 0.0, 0.0, 0.5]], lattice_param) - - def testTwoByTwoTwoOutputs(self): - lattice_param = lattice_layers.lattice_param_as_linear( - lattice_sizes=[2, 2], - output_dim=2, - linear_weights=[[1.0, 1.0], [-0.1, 0.3]]) - self.assertAllClose([[-0.5, 0.0, 0.0, 0.5], [-0.05, -0.1, 0.1, 0.05]], - lattice_param) - - def testTwoByThreeByTwoOneOutput(self): - lattice_param = lattice_layers.lattice_param_as_linear( - lattice_sizes=[2, 3, 2], output_dim=1, linear_weights=[-1.0, 1.0, 1.0]) - self.assertAllClose([[ - -0.1666667, -0.5, 0.0, -0.3333333, 0.1666667, -0.1666667, 0.1666667, - -0.1666667, 0.3333333, 0.0, 0.5, 0.1666667 - ]], lattice_param) - - def testWrongLatticeSizesExpectError(self): - with self.assertRaises(ValueError): - _ = lattice_layers.lattice_param_as_linear( - lattice_sizes=[1, -1], output_dim=1) - - def testEmptyLatticeSizesExpectError(self): - with self.assertRaises(ValueError): - _ = lattice_layers.lattice_param_as_linear(lattice_sizes=[], output_dim=1) - - def testMoreLinearWeightsThanLatticeRankExpectError(self): - with self.assertRaises(ValueError): - _ = lattice_layers.lattice_param_as_linear( - lattice_sizes=[2, 2], output_dim=1, linear_weights=[1, 2, 3]) - - def testLessLinearWeightsThanOutputDimExpectError(self): - with self.assertRaises(ValueError): - _ = lattice_layers.lattice_param_as_linear( - lattice_sizes=[2, 2], output_dim=2, linear_weights=[[1, 2]]) - - def testWrongLinearWeightsExpectError(self): - with self.assertRaises(ValueError): - _ = lattice_layers.lattice_param_as_linear( - lattice_sizes=[2, 2], output_dim=2, linear_weights=[[1], [1, 2]]) - - def testTwoByThreeByTwoOneOutputMonotonic(self): - lattice_param = lattice_layers.lattice_param_as_linear_monotonic( - lattice_sizes=[2, 3, 2], - output_dim=1, - is_monotone=[True, False, True], - output_min=-2.0, - output_max=4.0) - self.assertAllClose( - [[-2.0, 1.0, -2.0, 1.0, -2.0, 1.0, 1.0, 4.0, 1.0, 4.0, 1.0, 4.0]], - lattice_param) - - -class LatticeLayersTestCase(tf.test.TestCase): - - def _testLatticeLayerEvaluation(self, interpolation_type, lattice_sizes, - output_dim, inputs, parameters, - expected_outputs): - """Test evaluation of lattice layers.""" - with tf.Graph().as_default(): - input_tensor = tf.constant(inputs, dtype=tf.float32) - init_param = tf.constant(parameters, dtype=tf.float32) - (output_tensor, _, _, _) = lattice_layers.lattice_layer( - input_tensor, - lattice_sizes=lattice_sizes, - output_dim=output_dim, - interpolation_type=interpolation_type, - lattice_initializer=init_param) - - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_tensor_values = sess.run(output_tensor) - self.assertAllClose(output_tensor_values, expected_outputs) - - def testWrongInterpolationTypeExpectError(self): - with self.assertRaises(ValueError): - self._testLatticeLayerEvaluation( - interpolation_type='wrong', - output_dim=2, - lattice_sizes=[2, 2], - inputs=[[0.5, 0.5]], - parameters=[[1.0, 2.0], [3.0, 4.0]], - expected_outputs=[[2.5]]) - - def testHypercubeEvaluation(self): - inputs = [[-1.0, 0.0], [0.0, 0.0], [0.1, 0.9], [0.3, 1.1], [1.5, 2.0], - [1.6, 3.0]] - parameters = [[0.0, 0.1, 1.1, 2.3, 3.1, 4.2], - [5.1, 2.11, 1.11, 3.21, -1.02, -2.2]] - expected_outputs = [[0.0, 5.1], [0.0, 5.1], [1.099, 1.6681], - [1.657, 1.4286], [4.2, -2.2], [4.2, -2.2]] - self._testLatticeLayerEvaluation( - interpolation_type='hypercube', - output_dim=2, - lattice_sizes=[2, 3], - inputs=inputs, - parameters=parameters, - expected_outputs=expected_outputs) - - def testSimplexEvaluation(self): - inputs = [[-1.0, 0.0], [0.0, 0.0], [0.1, 0.9], [0.3, 1.1], [1.5, 2.0], - [1.6, 3.0]] - parameters = [[0.0, 0.1, 1.1, 2.3, 3.1, 4.2], - [5.1, 2.11, 1.11, 3.21, -1.02, -2.2]] - expected_outputs = [[0.0, 5.1], [0.0, 5.1], [1.11, 1.719], [1.65, 1.199], - [4.2, -2.2], [4.2, -2.2]] - self._testLatticeLayerEvaluation( - interpolation_type='simplex', - output_dim=2, - lattice_sizes=[2, 3], - inputs=inputs, - parameters=parameters, - expected_outputs=expected_outputs) - - def testHypercubeEvaluationWithLinearParam(self): - lattice_sizes = [2, 3] - output_dim = 2 - inputs = [[0.0, 0.0], [0.1, 0.9], [0.3, 1.1], [1.5, 2.0]] - # This parameter works as a linear function - # f(x1, x2) == 1/2 * (x1 + x2) - 0.75 - parameters = lattice_layers.lattice_param_as_linear( - lattice_sizes=lattice_sizes, linear_weights=[1.0, 2.0], output_dim=2) - expected_outputs = [[-0.75, -0.75], [-0.25, -0.25], [-0.05, -0.05], - [0.75, 0.75]] - self._testLatticeLayerEvaluation( - interpolation_type='hypercube', - output_dim=output_dim, - lattice_sizes=lattice_sizes, - inputs=inputs, - parameters=parameters, - expected_outputs=expected_outputs) - - def testSimplexEvaluationWithLinearParam(self): - lattice_sizes = [2, 3] - output_dim = 2 - inputs = [[0.0, 0.0], [0.1, 0.9], [0.3, 1.1], [1.5, 2.0]] - # This parameter works as linear functions - # f(x1, x2) = [0.5 * (x1 + x2) - 0.75, x1 + x2 - 1.5] - parameters = lattice_layers.lattice_param_as_linear( - lattice_sizes=lattice_sizes, - output_dim=2, - linear_weights=[[1.0, 2.0], [2.0, 4.0]]) - expected_outputs = [[-0.75, -1.5], [-0.25, -0.5], [-0.05, -0.1], - [0.75, 1.5]] - self._testLatticeLayerEvaluation( - interpolation_type='simplex', - output_dim=output_dim, - lattice_sizes=lattice_sizes, - inputs=inputs, - parameters=parameters, - expected_outputs=expected_outputs) - - def testHypercubeNoRegularizationExpectsNone(self): - lattice_sizes = [2, 3] - with tf.Graph().as_default(): - input_tensor = tf.compat.v1.placeholder(shape=[None, 2], dtype=tf.float32) - (_, _, _, regularization) = lattice_layers.lattice_layer( - input_tensor, - lattice_sizes=lattice_sizes, - output_dim=1, - interpolation_type='hypercube') - self.assertEqual(regularization, None) - - def testSimplexNoRegularizationExpectsNone(self): - lattice_sizes = [2, 3] - with tf.Graph().as_default(): - input_tensor = tf.compat.v1.placeholder(shape=[None, 2], dtype=tf.float32) - (_, _, _, regularization) = lattice_layers.lattice_layer( - input_tensor, - lattice_sizes=lattice_sizes, - output_dim=1, - interpolation_type='simplex') - self.assertEqual(regularization, None) - - def testHypercubeRegularization(self): - lattice_sizes = [2, 3] - parameters = [[0.0, 0.1, 1.1, 2.3, 3.1, 4.2], - [5.1, 2.11, 1.11, 3.21, -1.02, -2.2]] - output_dim = 2 - with tf.Graph().as_default(): - input_tensor = tf.compat.v1.placeholder(shape=[None, 2], dtype=tf.float32) - init_param = tf.constant(parameters, dtype=tf.float32) - (_, _, _, regularization) = lattice_layers.lattice_layer( - input_tensor, - lattice_sizes=lattice_sizes, - output_dim=output_dim, - interpolation_type='hypercube', - l1_reg=0.1, - l2_reg=0.1, - l1_torsion_reg=0.1, - l2_torsion_reg=0.1, - l1_laplacian_reg=[0.1, 0.1], - l2_laplacian_reg=[0.1, 0.1], - lattice_initializer=init_param) - - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - self.assertAlmostEqual(26.514278, sess.run(regularization), delta=1e-5) - - def testSimplexRegularization(self): - lattice_sizes = [2, 3] - parameters = [[0.0, 0.1, 1.1, 2.3, 3.1, 4.2], - [5.1, 2.11, 1.11, 3.21, -1.02, -2.2]] - output_dim = 2 - with tf.Graph().as_default(): - input_tensor = tf.compat.v1.placeholder(shape=[None, 2], dtype=tf.float32) - init_param = tf.constant(parameters, dtype=tf.float32) - (_, _, _, regularization) = lattice_layers.lattice_layer( - input_tensor, - lattice_sizes=lattice_sizes, - output_dim=output_dim, - interpolation_type='simplex', - l1_reg=0.1, - l2_reg=0.1, - l1_torsion_reg=0.1, - l2_torsion_reg=0.1, - l1_laplacian_reg=[0.1, 0.1], - l2_laplacian_reg=[0.1, 0.1], - lattice_initializer=init_param) - - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - self.assertAlmostEqual(26.514278, sess.run(regularization), delta=1e-5) - - def _testLatticeLayerProjection(self, interpolation_type, lattice_sizes, - output_dim, output_min, output_max, - is_monotone, parameters, - expected_projected_parameters): - """Test monotonicity projection of lattice layers.""" - with tf.Graph().as_default(): - input_tensor = tf.zeros([1, len(lattice_sizes)], dtype=tf.float32) - (_, param_tensor, projection_op, _) = lattice_layers.lattice_layer( - input_tensor, - lattice_sizes=lattice_sizes, - is_monotone=is_monotone, - output_dim=output_dim, - output_min=output_min, - output_max=output_max, - interpolation_type=interpolation_type) - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - sess.run( - tf.compat.v1.assign(param_tensor, tf.constant(parameters, dtype=tf.float32))) - sess.run(projection_op) - param_tensor_values = param_tensor.eval() - - self.assertAllClose( - param_tensor_values, expected_projected_parameters, atol=1e-4) - - def testProjectionWithNonMonotonicHypercube(self): - parameters = [[0.0, 0.1, 1.1, 2.3, 3.1, 4.2], - [5.1, 2.11, 1.11, 3.21, -1.02, -2.2]] - expected_projected_parameters = parameters - self._testLatticeLayerProjection( - interpolation_type='hypercube', - is_monotone=[False, False], - output_dim=2, - output_min=None, - output_max=None, - lattice_sizes=[2, 3], - parameters=parameters, - expected_projected_parameters=expected_projected_parameters) - - def testProjectionWithNonMonotonicSimplex(self): - parameters = [[0.0, 0.1, 1.1, 2.3, 3.1, 4.2], - [5.1, 2.11, 1.11, 3.21, -1.02, -2.2]] - expected_projected_parameters = parameters - self._testLatticeLayerProjection( - interpolation_type='simplex', - is_monotone=[False, False], - output_dim=2, - output_min=None, - output_max=None, - lattice_sizes=[2, 3], - parameters=parameters, - expected_projected_parameters=expected_projected_parameters) - - def testProjectionWithFullMonotonicHypercube(self): - parameters = [[0.0, 0.1, 1.1, 2.3, 3.1, 4.2], - [5.1, 2.11, 1.11, 3.21, -1.02, -2.2]] - expected_projected_parameters = [[0, 0.1, 1.1, 2.3, 3.1, 4.2], - [1.385, 1.385, 1.385, 1.385, 1.385, 1.385]] - self._testLatticeLayerProjection( - interpolation_type='hypercube', - is_monotone=[True, True], - output_dim=2, - output_min=None, - output_max=None, - lattice_sizes=[2, 3], - parameters=parameters, - expected_projected_parameters=expected_projected_parameters) - - def testProjectionWithFullMonotonicSimplex(self): - parameters = [[0.0, 0.1, 1.1, 2.3, 3.1, 4.2], - [5.1, 2.11, 1.11, 3.21, -1.02, -2.2]] - expected_projected_parameters = [[0, 0.1, 1.1, 2.3, 3.1, 4.2], - [1.385, 1.385, 1.385, 1.385, 1.385, 1.385]] - self._testLatticeLayerProjection( - interpolation_type='simplex', - is_monotone=[True, True], - output_dim=2, - output_min=None, - output_max=None, - lattice_sizes=[2, 3], - parameters=parameters, - expected_projected_parameters=expected_projected_parameters) - - def testProjectionWithBoundedFullMonotonicHypercube(self): - parameters = [[0.0, 0.1, 1.1, 2.3, 3.1, 4.2], - [5.1, 2.11, 1.11, 3.21, -1.02, -2.2]] - expected_projected_parameters = [[0.3, 0.3, 1.1, 2.3, 3.0, 3.0], - [1.385, 1.385, 1.385, 1.385, 1.385, 1.385]] - self._testLatticeLayerProjection( - interpolation_type='hypercube', - is_monotone=[True, True], - output_dim=2, - output_min=0.3, - output_max=3.0, - lattice_sizes=[2, 3], - parameters=parameters, - expected_projected_parameters=expected_projected_parameters) - - -class EnsembleLatticesLayersTestCase(tf.test.TestCase): - - def _testEnsembleLatticesLayerEvaluation(self, interpolation_type, - lattice_sizes, structure, output_dim, - inputs, parameters, - expected_outputs_list): - """Test evaluation of ensemble lattices layers.""" - with tf.Graph().as_default(): - input_tensor = tf.constant(inputs, dtype=tf.float32) - init_params = [ - tf.constant(param, dtype=tf.float32) for param in parameters - ] - (output_tensor_lists, _, _, _) = lattice_layers.ensemble_lattices_layer( - input_tensor, - lattice_sizes=lattice_sizes, - structure_indices=structure, - output_dim=output_dim, - interpolation_type=interpolation_type, - lattice_initializers=init_params) - self.assertEqual(len(output_tensor_lists), len(structure)) - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - output_values_list = sess.run(output_tensor_lists) - self.assertAllClose(output_values_list, expected_outputs_list) - - def testHypercubeEvaluation(self): - inputs = [[-1.0, 0.0], [0.0, 0.0], [0.1, 0.9], [0.3, 1.1], [1.5, 2.0], - [1.6, 3.0]] - structure = [[0], [1], [0, 1]] - - # Construct params. - parameters = [] - # First one is 1d lattice with two outputs: - # output[0] = x[0], output[1] = 1-x[0]. - parameters.append([[0.0, 1.0], [1.0, 0.0]]) - # Second one is 1d lattice with two outputs: - # output[0] = x[1] for 1 <= x[1] <= 2, 0 otherwise - # output[1] = 1 - x[1] for 0 <= x[1] <= 1, 0 otherwise. - parameters.append([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]]) - # Third one is 2d lattice. - parameters.append([[0.0, 0.1, 1.1, 2.3, 3.1, 4.2], - [5.1, 2.11, 1.11, 3.21, -1.02, -2.2]]) - - # Construct expected outputs. - expected_outputs = [] - # Expected outputs from the first lattice. - expected_outputs.append([[0.0, 1.0], [0.0, 1.0], [0.1, 0.9], [0.3, 0.7], - [1.0, 0.0], [1.0, 0.0]]) - # Expected outputs from the second lattice. - expected_outputs.append([[0.0, 1.0], [0.0, 1.0], [0.0, 0.1], [0.1, 0.0], - [1.0, 0.0], [1.0, 0.0]]) - # Expected outputs from the third lattice. - expected_outputs.append([[0.0, 5.1], [0.0, 5.1], [1.099, 1.6681], - [1.657, 1.4286], [4.2, -2.2], [4.2, -2.2]]) - - self._testEnsembleLatticesLayerEvaluation( - interpolation_type='hypercube', - structure=structure, - output_dim=2, - lattice_sizes=[2, 3], - inputs=inputs, - parameters=parameters, - expected_outputs_list=expected_outputs) - - def testSimplexEvaluation(self): - inputs = [[-1.0, 0.0], [0.0, 0.0], [0.1, 0.9], [0.3, 1.1], [1.5, 2.0], - [1.6, 3.0]] - structure = [[0], [1], [0, 1]] - - # Construct params. - parameters = [] - # First one is 1d lattice with two outputs: - # output[0] = x[0], output[1] = 1 - x[0]. - parameters.append([[0.0, 1.0], [1.0, 0.0]]) - # Second one is 1d lattice with two outputs: - # output[0] = x[1] for 1 <= x[1] <= 2, 0 otherwise - # output[1] = 1 - x[1] for 0 <= x[1] <= 1, 0 otherwise. - parameters.append([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]]) - # Third one is 2d lattice with two outputs. - parameters.append([[0.0, 0.1, 1.1, 2.3, 3.1, 4.2], - [5.1, 2.11, 1.11, 3.21, -1.02, -2.2]]) - - # Construct expected outputs. - expected_outputs = [] - # Expected outputs from the first lattice. - expected_outputs.append([[0.0, 1.0], [0.0, 1.0], [0.1, 0.9], [0.3, 0.7], - [1.0, 0.0], [1.0, 0.0]]) - # Expected outputs from the second lattice. - expected_outputs.append([[0.0, 1.0], [0.0, 1.0], [0.0, 0.1], [0.1, 0.0], - [1.0, 0.0], [1.0, 0.0]]) - # Expected outputs from the third lattice. - expected_outputs.append([[0.0, 5.1], [0.0, 5.1], [1.11, 1.719], - [1.65, 1.199], [4.2, -2.2], [4.2, -2.2]]) - - self._testEnsembleLatticesLayerEvaluation( - interpolation_type='simplex', - structure=structure, - output_dim=2, - lattice_sizes=[2, 3], - inputs=inputs, - parameters=parameters, - expected_outputs_list=expected_outputs) - - def testHypercubeRegularization(self): - lattice_sizes = [2, 3] - structure = [[0], [1], [0, 1]] - # Construct params. - parameters = [] - parameters.append([[0.0, 1.0], [1.0, 0.0]]) - parameters.append([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]]) - parameters.append([[0.0, 0.1, 1.1, 2.3, 3.1, 4.2], - [5.1, 2.11, 1.11, 3.21, -1.02, -2.2]]) - output_dim = 2 - with tf.Graph().as_default(): - input_tensor = tf.compat.v1.placeholder(shape=[None, 2], dtype=tf.float32) - init_params = [ - tf.constant(param, dtype=tf.float32) for param in parameters - ] - (_, _, _, regularization) = lattice_layers.ensemble_lattices_layer( - input_tensor, - lattice_sizes=lattice_sizes, - structure_indices=structure, - output_dim=output_dim, - interpolation_type='hypercube', - l1_reg=0.1, - l2_reg=0.1, - l1_torsion_reg=0.1, - l2_torsion_reg=0.1, - l1_laplacian_reg=[0.1, 0.1], - l2_laplacian_reg=[0.1, 0.1], - lattice_initializers=init_params) - - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - self.assertAlmostEqual(28.114279, sess.run(regularization), delta=1e-5) - - def testSimplexRegularization(self): - lattice_sizes = [2, 3] - structure = [[0], [1], [0, 1]] - # Construct params. - parameters = [] - parameters.append([[0.0, 1.0], [1.0, 0.0]]) - parameters.append([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]]) - parameters.append([[0.0, 0.1, 1.1, 2.3, 3.1, 4.2], - [5.1, 2.11, 1.11, 3.21, -1.02, -2.2]]) - output_dim = 2 - with tf.Graph().as_default(): - input_tensor = tf.compat.v1.placeholder(shape=[None, 2], dtype=tf.float32) - init_params = [ - tf.constant(param, dtype=tf.float32) for param in parameters - ] - (_, _, _, regularization) = lattice_layers.ensemble_lattices_layer( - input_tensor, - lattice_sizes=lattice_sizes, - structure_indices=structure, - output_dim=output_dim, - interpolation_type='simplex', - l1_reg=0.1, - l2_reg=0.1, - l1_torsion_reg=0.1, - l2_torsion_reg=0.1, - l1_laplacian_reg=[0.1, 0.1], - l2_laplacian_reg=[0.1, 0.1], - lattice_initializers=init_params) - - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - self.assertAlmostEqual(28.114279, sess.run(regularization), delta=1e-5) - - def _testEnsembleLatticesLayerProjection(self, interpolation_type, - lattice_sizes, structure, output_dim, - is_monotone, parameters, - expected_projected_parameters): - """Test monotonicity projection of lattice layers.""" - with tf.Graph().as_default(): - input_tensor = tf.zeros([1, len(lattice_sizes)], dtype=tf.float32) - (_, param_tensors, proj, _) = lattice_layers.ensemble_lattices_layer( - input_tensor, - structure_indices=structure, - lattice_sizes=lattice_sizes, - is_monotone=is_monotone, - output_dim=output_dim, - lattice_initializers=parameters, - interpolation_type=interpolation_type) - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - # Check initialization. - param_tensor_values = sess.run(param_tensors) - self.assertEqual(len(param_tensor_values), len(parameters)) - for (param_value, expected_value) in zip(param_tensor_values, - parameters): - self.assertAllClose(param_value, expected_value, atol=1e-4) - # Check projection. - sess.run(proj) - param_tensor_values = sess.run(param_tensors) - self.assertEqual( - len(param_tensor_values), len(expected_projected_parameters)) - for (param_value, expected_value) in zip(param_tensor_values, - expected_projected_parameters): - self.assertAllClose(param_value, expected_value, atol=1e-4) - - def testProjectionWithNonMonotonicHypercube(self): - structure = [[0], [0, 1]] - params = [] - params.append([[0.0, 1.0], [1.0, -1.0]]) - params.append([[0.0, 0.1, 1.1, 2.3, 3.1, 4.2], - [5.1, 2.11, 1.11, 3.21, -1.02, -2.2]]) - expected_proj_params = params - self._testEnsembleLatticesLayerProjection( - interpolation_type='hypercube', - structure=structure, - is_monotone=[False, False], - output_dim=2, - lattice_sizes=[2, 3], - parameters=params, - expected_projected_parameters=expected_proj_params) - - def testProjectionWithNonMonotonicSimplex(self): - structure = [[0], [0, 1]] - params = [] - params.append([[0.0, 1.0], [1.0, -1.0]]) - params.append([[0.0, 0.1, 1.1, 2.3, 3.1, 4.2], - [5.1, 2.11, 1.11, 3.21, -1.02, -2.2]]) - expected_proj_params = params - self._testEnsembleLatticesLayerProjection( - interpolation_type='hypercube', - structure=structure, - is_monotone=[False, False], - output_dim=2, - lattice_sizes=[2, 3], - parameters=params, - expected_projected_parameters=expected_proj_params) - - def testProjectionWithFullMonotonicHypercube(self): - structure = [[0], [0, 1]] - params = [] - params.append([[0.0, -10.0], [0.0, 5.0]]) - params.append([[0.0, 0.1, 1.1, 2.3, 3.1, 4.2], - [5.1, 2.11, 1.11, 3.21, -1.02, -2.2]]) - expected_proj_params = [] - expected_proj_params.append([[-5.0, -5.0], [0.0, 5.0]]) - expected_proj_params.append([[0, 0.1, 1.1, 2.3, 3.1, 4.2], - [1.385, 1.385, 1.385, 1.385, 1.385, 1.385]]) - self._testEnsembleLatticesLayerProjection( - interpolation_type='hypercube', - structure=structure, - is_monotone=[True, True], - output_dim=2, - lattice_sizes=[2, 3], - parameters=params, - expected_projected_parameters=expected_proj_params) - - def testProjectionWithFullMonotonicSimplex(self): - structure = [[0], [0, 1]] - params = [] - params.append([[0.0, -10.0], [0.0, 5.0]]) - params.append([[0.0, 0.1, 1.1, 2.3, 3.1, 4.2], - [5.1, 2.11, 1.11, 3.21, -1.02, -2.2]]) - expected_proj_params = [] - expected_proj_params.append([[-5.0, -5.0], [0.0, 5.0]]) - expected_proj_params.append([[0, 0.1, 1.1, 2.3, 3.1, 4.2], - [1.385, 1.385, 1.385, 1.385, 1.385, 1.385]]) - self._testEnsembleLatticesLayerProjection( - interpolation_type='simplex', - structure=structure, - is_monotone=True, - output_dim=2, - lattice_sizes=[2, 3], - parameters=params, - expected_projected_parameters=expected_proj_params) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_lattice/python/lib/monotone_linear_layers.py b/tensorflow_lattice/python/lib/monotone_linear_layers.py deleted file mode 100644 index 6fd0750..0000000 --- a/tensorflow_lattice/python/lib/monotone_linear_layers.py +++ /dev/null @@ -1,278 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Monotonic linear embedding layers library for TensorFlow.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from tensorflow_lattice.python.lib import regularizers -from tensorflow_lattice.python.lib import tools - - -def monotone_linear_layer(input_tensor, - input_dim, - output_dim, - is_monotone=None, - add_bias=True, - normalization_order=None, - init_weight_mean=2.0, - init_weight_stddev=0.5, - init_bias=None, - l1_reg=None, - l2_reg=None): - """Creates a partially monotonic linear embedding layer. - - Returns an output of partially monotonic linear embedding layer, weights in - the linear embedding layer, projection ops and regularizers. - - output = input * weight' + bias - - and the kth row is constrained to be non-negative, if is_monotone[k] == True. - weight is initialized to entrywise Normal random variable (init_weight_mean, - init_weight_stdev). If init_b is not provided, then the initial bias is - initialized to -1/2 * init_weight_mean * input_dim. This offset term is used - to make the initial mean to 0, assuming each input tensor is from the uniform - distribution [0, 1]: - E[output] = E[input * weight' + bias] = E[input] * E[weight] + bias - = 1/2 * init_weight_mean * input_dim + bias - = 0. - - Args: - input_tensor: [batch_size, input_dim] tensor. - input_dim: (int) input dimension. - output_dim: (int) output dimension. - is_monotone: A list of input_dim booleans, a single boolean, or None. If - None or False, linear layer will not have monotonicity constraints. If - True, all of inputs are set to be monotonic. In the case of boolean list, - input_tensor[:, k] is set to be monotonic if is_monotone[k] == True. - add_bias: (bool) If a bias term should be added. - normalization_order: If specified, the returned projection will normalize - the weight vector across each output dimension to have norm 1. The norm - order can be 1, 2 or np.inf. Norm is lower bounded by 1e-12. - init_weight_mean: (float) A mean for Normal random weight initializer. - init_weight_stddev: (float) A standard deviation for Normal random weight - initializer. - init_bias: (float) initial bias. If not provided, -1/2 * init_weight_mean * - input_dim is used. - l1_reg: (float) amount of l1 regularization. - l2_reg: (float) amount of l2 regularization. - - Returns: - A tuple of: - * output tensor of shape [batch_size, output_dim] - * weight tensor of shape [output_dim, input_dim] - * None or projection ops, that must be applied at each - step (or every so many steps) to project the model to a feasible space: - used for bounding the outputs or for imposing monotonicity. - * None or a regularization loss, if regularization is configured. - - Raises: - ValueError: If is_monotone is not None, but its length != input_dim. - """ - with tf.compat.v1.variable_scope('monotone_linear'): - # We use [output_dim, input_dim] convention to use broadcasting in - # projeciton. - init_weights = tf.random.normal([output_dim, input_dim], - mean=init_weight_mean, - stddev=init_weight_stddev) - if init_bias is None: - init_biases = [-init_weight_mean * 0.5 * input_dim] * output_dim - else: - init_biases = [init_bias] * output_dim - - w = tf.compat.v1.get_variable( - name='weight', initializer=init_weights, dtype=input_tensor.dtype) - output_tensor = tf.matmul(input_tensor, w, transpose_b=True) - if add_bias: - b = tf.compat.v1.get_variable( - name='bias', initializer=init_biases, dtype=input_tensor.dtype) - output_tensor = output_tensor + b - - # Constructing a projection op. - projection = None - if is_monotone or normalization_order: - with tf.name_scope('monotonic_projection'): - diff = None - if is_monotone: - if isinstance(is_monotone, list): - # is_monotone is given as a list. We should only apply positivity - # constraints to a masked version of the weights. - if input_dim != len(is_monotone): - raise ValueError('input_dim (%d) != is_monotone length (%d)' % - (input_dim, len(is_monotone))) - # Construct a multiplicative mask for monotonic dimension - # selection. - monotone_mask = tf.constant( - [1.0 if monotone else 0.0 for monotone in is_monotone], - dtype=w.dtype) - # Since input_dim is the last dimension of the weight, we can use - # broadcasting. - masked_w = tf.multiply(w, monotone_mask) - else: - # is_monotone is set to True. - masked_w = w - - projected_w = tf.maximum(masked_w, 0.0) - diff = projected_w - masked_w - - if normalization_order: - unnormalized_w = w if diff is None else w + diff - normalized_w = unnormalized_w / tf.maximum( - tf.norm( - unnormalized_w, - ord=normalization_order, - axis=1, - keepdims=True), 1e-12) - diff = normalized_w - w - - projection = w.assign_add(diff) - - # Constructing a regularization op. - regularizer = None - if l1_reg is not None or l2_reg is not None: - with tf.name_scope('linear_regularization'): - regularizer = regularizers.linear_regularization(w, l1_reg, l2_reg) - - return (output_tensor, w, projection, regularizer) - - -def split_monotone_linear_layer(input_tensor, - input_dim, - monotonic_output_dim, - non_monotonic_output_dim, - is_monotone=None, - init_weight_mean=2.0, - init_weight_stddev=0.5, - init_bias=None, - l1_reg=None, - l2_reg=None): - """Creates a split monotonic linear embedding layer. - - Returns outputs of partially monotonic linear embedding layers, weights in - the linear embedding layers, projection ops and regularizers. This function - splits monotonic and non-monotonic input based on is_monotone, and creates - two separate linear embedding in the following form: - - monotonic_output = monotonic_input * monotonic_weight - + non-monotonic_input * nm_weight - + bias - non_monotonic_output = non-monotonic_input * nn_weight + bias - - where monotonic_weight has to be non-negative. All elements in - monotonic_output should be treated as a monotonic signal, otherwise there - would be no monotonicity guarantee. - Weights are initialized as in monotone_linear_layer. - - Args: - input_tensor: [batch_size, input_dim] tensor. - input_dim: (int) input dimension. - monotonic_output_dim: (int) monotonic_output's dimension. - non_monotonic_output_dim: (int) non_monotonic_output's dimension. - is_monotone: A list of input_dim booleans, or None. If None, all inputs are - set to be non-monotonic. In a boolean list case, the input_tensor[:, k] - is set to be monotonic input if is_monotone[k] == True. - init_weight_mean: (float) A mean for Normal random weight initializer. - init_weight_stddev: (float) A standard deviation for Normal random weight - initializer. - init_bias: (float) initial bias. If not provided, - -1/2 * init_weight_mean * input_dim is used. - l1_reg: (float) amount of l1 regularization. - l2_reg: (float) amount of l2 regularization. - - Returns: - A tuple of: - * monotonic_output tensor of shape [batch_size, monotonic_output_dim] - or None if monotonic_outpu_dim == 0. - * monotonic output's weight tensor of shape - [input_dim, monotonic_output_dim] or None if monotonic_outpu_dim == 0. - * non_monotonic_output tensor of shape - [batch_size, non_monotonic_output_dim] or None if - non_monotonic_output_dim == 0. - * non_monotonic_output's weight tensor of shape - [non_monotonic_input_dim, non_monotonic_output_dim] or None if - non_monotonic_output_dim == 0. - * None or projection ops, that must be applied at each - step (or every so many steps) to project the model to a feasible space: - used for bounding the outputs or for imposing monotonicity. - * None or a regularization loss, if regularization is configured. - - Raises: - ValueError: * If is_monotone is not None nor a list. - * is_monotone is a list but its length != input_dim. - * All values is_monotone is True, but non_monotonic_output_dim is not 0. - """ - monotonic_output = None - m_weight = None - non_monotonic_output = None - n_weight = None - projections = [] - regularization = None - if monotonic_output_dim > 0: - with tf.compat.v1.variable_scope('split_monotone'): - packed_results = monotone_linear_layer( - input_tensor, - input_dim=input_dim, - output_dim=monotonic_output_dim, - is_monotone=is_monotone, - init_weight_mean=init_weight_mean, - init_weight_stddev=init_weight_stddev, - init_bias=init_bias, - l1_reg=l1_reg, - l2_reg=l2_reg) - (monotonic_output, m_weight, projection, regularizer) = packed_results - projections.append(projection) - regularization = tools.add_if_not_none(regularization, regularizer) - - if non_monotonic_output_dim > 0: - with tf.compat.v1.variable_scope('split_non_monotone'): - # Construct non_monotone_input_tensor. - if is_monotone is None: - non_monotone_input_tensor = input_tensor - else: - if not isinstance(is_monotone, list): - raise ValueError('is_monotone should be None or a list of booleans') - if len(is_monotone) != input_dim: - raise ValueError('input_dim (%d) != is_monotone length (%d)' % - (input_dim, len(is_monotone))) - - input_columns = tf.unstack(input_tensor, axis=1) - non_monotone_columns = [] - for (monotone, input_column) in zip(is_monotone, input_columns): - if not monotone: - non_monotone_columns.append(input_column) - if not non_monotone_columns: - raise ValueError( - 'non_monotonic_output_dim is not None nor zero, but all inputs ' - 'are required to be non-monotonic.') - non_monotone_input_tensor = tf.stack(non_monotone_columns, axis=1) - # Create a linear embedding. - packed_results = monotone_linear_layer( - non_monotone_input_tensor, - input_dim=len(non_monotone_columns), - output_dim=non_monotonic_output_dim, - is_monotone=None, - init_weight_mean=init_weight_mean, - init_weight_stddev=init_weight_stddev, - init_bias=init_bias, - l1_reg=l1_reg, - l2_reg=l2_reg) - (non_monotonic_output, n_weight, _, regularizer) = packed_results - regularization = tools.add_if_not_none(regularization, regularizer) - - return (monotonic_output, m_weight, non_monotonic_output, n_weight, - projections, regularization) diff --git a/tensorflow_lattice/python/lib/monotone_linear_layers_test.py b/tensorflow_lattice/python/lib/monotone_linear_layers_test.py deleted file mode 100644 index a766afa..0000000 --- a/tensorflow_lattice/python/lib/monotone_linear_layers_test.py +++ /dev/null @@ -1,353 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for TensorFlow Lattice's monotone_linear_layers module.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from tensorflow_lattice.python.lib import monotone_linear_layers - - -class MonotoneLinearTestCase(tf.test.TestCase): - - def testEvaluationWithZeroBias(self): - """Create a partial monotone linear layer and check evaluation.""" - input_placeholder = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[None, 3]) - input_tensor = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] - sum_input_tensor = [[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]] - # Check linearity of the output tensor. - # f(input_tensor + input_tensor) = 2 * f(input_tensor) - # since the bias is 0. - packed_results = monotone_linear_layers.monotone_linear_layer( - input_placeholder, input_dim=3, output_dim=5, init_bias=0.0) - (output_tensor, _, _, _) = packed_results - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - # Check linearity of the output tensor. - # f(input_tensor + input_tensor) = 2 * f(input_tensor) - # since the bias is 0. - output_val = sess.run( - output_tensor, feed_dict={input_placeholder: input_tensor}) - sum_output_val = sess.run( - output_tensor, feed_dict={input_placeholder: sum_input_tensor}) - expected_sum_output_val = 2 * output_val - self.assertAllClose(expected_sum_output_val, sum_output_val) - - def testEvaluationWithDefaultBias(self): - """Create a partial monotone linear layer and check the bias.""" - input_dim = 10 - input_placeholder = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[None, input_dim]) - # Monotone linear layers contain random weights and for this input_tensor - # we expect 0 as an output on "average". In order to control randomness, we - # set the standard deviation exactly zero. - input_tensor = [[0.5] * input_dim] - expected_output_val = [[0.0]] - packed_results = monotone_linear_layers.monotone_linear_layer( - input_placeholder, - input_dim=input_dim, - output_dim=1, - init_weight_stddev=0.0) - (output_tensor, _, _, _) = packed_results - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - # Check linearity of the output tensor. - # f(input_tensor + input_tensor) = 2 * f(input_tensor) - # since the bias is 0. - output_val = sess.run( - output_tensor, feed_dict={input_placeholder: input_tensor}) - self.assertAllClose(expected_output_val, output_val) - - def testProjection(self): - """Create a partial monotone linear layer and check the projection.""" - input_dim = 10 - is_monotone = [True, False] * 5 - input_placeholder = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[None, input_dim]) - # We set the initial_weight_mean to -10.0. After projection, we expect - # elements corresponding to monotonic input becomes 0. - packed_results = monotone_linear_layers.monotone_linear_layer( - input_placeholder, - input_dim=input_dim, - output_dim=2, - is_monotone=is_monotone, - init_weight_mean=-10.0, - init_weight_stddev=0.0) - (_, weight_tensor, projection_op, _) = packed_results - # The weight is in shape (output_dim, input_dim). - expected_pre_projection_weight = [[-10.0] * 10] * 2 - expected_projected_weight = [[0.0, -10.0] * 5] * 2 - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - pre_projection_weight = sess.run(weight_tensor) - sess.run(projection_op) - projected_weight = sess.run(weight_tensor) - self.assertAllClose(expected_pre_projection_weight, pre_projection_weight) - self.assertAllClose(expected_projected_weight, projected_weight) - - def testNormalizationProjection(self): - """Test projection when l1 normalization is requested.""" - input_dim = 10 - is_monotone = [True, False] * 5 - input_placeholder = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[None, input_dim]) - # We set the initial_weight_mean to -10.0. After projection, we expect - # elements corresponding to monotonic input becomes 0. - packed_results = monotone_linear_layers.monotone_linear_layer( - input_placeholder, - input_dim=input_dim, - output_dim=2, - is_monotone=is_monotone, - init_weight_mean=-10.0, - init_weight_stddev=0.0, - add_bias=False, - normalization_order=1, - ) - (_, weight_tensor, projection_op, _) = packed_results - # The weight is in shape (output_dim, input_dim). - expected_pre_projection_weight = [[-10.0] * 10] * 2 - expected_projected_weight = [[0.0, -0.2] * 5] * 2 - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - pre_projection_weight = sess.run(weight_tensor) - sess.run(projection_op) - projected_weight = sess.run(weight_tensor) - self.assertAllClose(expected_pre_projection_weight, pre_projection_weight) - self.assertAllClose(expected_projected_weight, projected_weight) - - def testNoRegularizationExpectsNone(self): - """Create a monotone linear layer and check no regularization.""" - input_dim = 10 - input_placeholder = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[None, input_dim]) - # We set the initial_weight_mean to -10.0. - (_, _, _, regularization) = monotone_linear_layers.monotone_linear_layer( - input_placeholder, - input_dim=input_dim, - output_dim=2, - init_weight_mean=-10.0, - init_weight_stddev=0.0) - self.assertIsNone(regularization) - - def testRegularization(self): - """Create a monotone linear layer and check regularization.""" - input_dim = 10 - input_placeholder = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[None, input_dim]) - # We set the initial_weight_mean to -10.0. - (_, _, _, regularization) = monotone_linear_layers.monotone_linear_layer( - input_placeholder, - input_dim=input_dim, - output_dim=2, - init_weight_mean=-10.0, - init_weight_stddev=0.0, - l1_reg=0.1, - l2_reg=0.1) - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - self.assertAlmostEqual(220.0, sess.run(regularization), delta=1e-5) - - -class SplitMonotoneLinearTestCase(tf.test.TestCase): - - def testEvaluation(self): - """Create a split monotone linear layer and check the results.""" - batch_size = 5 - input_dim = 10 - monotonic_output_dim = 2 - non_monotonic_output_dim = 3 - # First five is monotonic, and the last five is non-monotonic. - is_monotone = [True] * 5 + [False] * 5 - input_placeholder = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[batch_size, input_dim]) - packed_results = monotone_linear_layers.split_monotone_linear_layer( - input_placeholder, - input_dim=input_dim, - monotonic_output_dim=monotonic_output_dim, - non_monotonic_output_dim=non_monotonic_output_dim, - is_monotone=is_monotone) - (monotonic_output, _, non_monotonic_output, _, _, _) = packed_results - - # Check the shape of outputs. - self.assertAllEqual(monotonic_output.shape, - [batch_size, monotonic_output_dim]) - self.assertAllEqual(non_monotonic_output.shape, - [batch_size, non_monotonic_output_dim]) - - # Check monotonic inputs are not part of non_monotonic_output. - # We do this by changing the first half of inputs and check whether it - # changes the value or not. - zero_input = [[0.0] * 10] * 5 - identity_in_monotone_inputs = [ - [1.0, 0.0, 0.0, 0.0, 0.0] + [0.0] * 5, - [0.0, 1.0, 0.0, 0.0, 0.0] + [0.0] * 5, - [0.0, 0.0, 1.0, 0.0, 0.0] + [0.0] * 5, - [0.0, 0.0, 0.0, 1.0, 0.0] + [0.0] * 5, - [0.0, 0.0, 0.0, 0.0, 1.0] + [0.0] * 5, - ] - - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - non_monotonic_output_at_zero = sess.run( - non_monotonic_output, feed_dict={input_placeholder: zero_input}) - non_monotonic_output_at_identity = sess.run( - non_monotonic_output, - feed_dict={input_placeholder: identity_in_monotone_inputs}) - - self.assertAllClose(non_monotonic_output_at_zero, - non_monotonic_output_at_identity) - - def testProjection(self): - """Check projection operator.""" - input_dim = 2 - monotonic_output_dim = 2 - non_monotonic_output_dim = 1 - # First five is monotonic, and the last five is non-monotonic. - is_monotone = [True, False] - input_placeholder = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[None, input_dim]) - packed_results = monotone_linear_layers.split_monotone_linear_layer( - input_placeholder, - input_dim=input_dim, - monotonic_output_dim=monotonic_output_dim, - non_monotonic_output_dim=non_monotonic_output_dim, - is_monotone=is_monotone, - init_weight_mean=-10.0, - init_weight_stddev=0.0) - (_, monotone_weights, _, non_monotone_weights, proj, _) = packed_results - - expected_pre_monotone_weights = [[-10.0, -10.0]] * 2 - expected_pre_non_monotone_weights = [[-10.0]] - expected_projected_monotone_weights = [[0.0, -10.0]] * 2 - expected_projected_non_monotone_weights = [[-10.0]] - - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - self.assertAllClose(expected_pre_monotone_weights, - monotone_weights.eval()) - self.assertAllClose(expected_pre_non_monotone_weights, - non_monotone_weights.eval()) - sess.run(proj) - self.assertAllClose(expected_projected_monotone_weights, - monotone_weights.eval()) - self.assertAllClose(expected_projected_non_monotone_weights, - non_monotone_weights.eval()) - - def testBooleanIsMonotoneExpectsError(self): - """Test empty non monotonic output.""" - input_dim = 2 - monotonic_output_dim = 2 - non_monotonic_output_dim = 1 - is_monotone = True - input_placeholder = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[None, input_dim]) - with self.assertRaises(ValueError): - _ = monotone_linear_layers.split_monotone_linear_layer( - input_placeholder, - input_dim=input_dim, - monotonic_output_dim=monotonic_output_dim, - non_monotonic_output_dim=non_monotonic_output_dim, - is_monotone=is_monotone, - init_weight_mean=-10.0, - init_weight_stddev=0.0) - - def testZeroNonMonotonicOutputExpectEmptyNonMonotonicOutput(self): - """Test empty non monotonic output.""" - input_dim = 2 - monotonic_output_dim = 2 - non_monotonic_output_dim = 0 - is_monotone = [True, True] - input_placeholder = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[None, input_dim]) - packed_results = monotone_linear_layers.split_monotone_linear_layer( - input_placeholder, - input_dim=input_dim, - monotonic_output_dim=monotonic_output_dim, - non_monotonic_output_dim=non_monotonic_output_dim, - is_monotone=is_monotone, - init_weight_mean=-10.0, - init_weight_stddev=0.0) - (_, _, non_monotonic_outputs, non_monotonic_weights, _, _) = packed_results - self.assertEqual(non_monotonic_outputs, None) - self.assertEqual(non_monotonic_weights, None) - - def testNoNonMonotonicInputsWithNonMonotonicOutputExpectFailure(self): - input_dim = 2 - monotonic_output_dim = 2 - non_monotonic_output_dim = 2 - is_monotone = [True, True] - input_placeholder = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[None, input_dim]) - with self.assertRaises(ValueError): - _ = monotone_linear_layers.split_monotone_linear_layer( - input_placeholder, - input_dim=input_dim, - monotonic_output_dim=monotonic_output_dim, - non_monotonic_output_dim=non_monotonic_output_dim, - is_monotone=is_monotone, - init_weight_mean=-10.0, - init_weight_stddev=0.0) - - def testNoRegularizationExpectsNone(self): - """Create a split monotone linear layer and check no regularization.""" - input_dim = 2 - monotonic_output_dim = 2 - non_monotonic_output_dim = 2 - is_monotone = [True, False] - input_placeholder = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[None, input_dim]) - # We set the initial_weight_mean to -10.0. - (_, _, _, _, _, - regularization) = monotone_linear_layers.split_monotone_linear_layer( - input_placeholder, - input_dim=input_dim, - monotonic_output_dim=monotonic_output_dim, - non_monotonic_output_dim=non_monotonic_output_dim, - is_monotone=is_monotone, - init_weight_mean=-10.0, - init_weight_stddev=0.0) - self.assertIsNone(regularization) - - def testRegularization(self): - """Create a split monotone linear layer and check regularization.""" - input_dim = 2 - monotonic_output_dim = 2 - non_monotonic_output_dim = 2 - is_monotone = [True, False] - input_placeholder = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[None, input_dim]) - # We set the initial_weight_mean to -10.0. - (_, _, _, _, _, - regularization) = monotone_linear_layers.split_monotone_linear_layer( - input_placeholder, - input_dim=input_dim, - monotonic_output_dim=monotonic_output_dim, - non_monotonic_output_dim=non_monotonic_output_dim, - is_monotone=is_monotone, - init_weight_mean=-10.0, - init_weight_stddev=0.0, - l1_reg=0.1, - l2_reg=0.1) - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - self.assertAlmostEqual(66.0, sess.run(regularization), delta=1e-5) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_lattice/python/lib/pwl_calibration_layers.py b/tensorflow_lattice/python/lib/pwl_calibration_layers.py deleted file mode 100644 index 3d3ce96..0000000 --- a/tensorflow_lattice/python/lib/pwl_calibration_layers.py +++ /dev/null @@ -1,622 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Piecewise linear calibration layers library for TensorFlow Lattice. - -Piecewise linear calibration is a 1D lookup table (LUT), part of TensorFlow -Lattice set of models, and typically used as calibration of input to lattice -models, but can be used in conjunction with other types of models as well. - -It also works particularly well with linear models, not breaking independence -of the variables (desirable in some situations). - -This modules provides functions used when building models, as opposed to the -basic operators exported by pwl_calibration_ops.py -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from tensorflow_lattice.python.lib import regularizers -from tensorflow_lattice.python.lib import tools -from tensorflow_lattice.python.ops import pwl_calibration_ops - - -def one_dimensional_calibration_layer(uncalibrated_tensor, - num_keypoints, - signal_name, - keypoints_initializers=None, - keypoints_initializer_fns=None, - bound=False, - monotonic=None, - missing_input_value=None, - missing_output_value=None, - **regularizer_amounts): - """Creates a calibration layer for one single continuous signal. - - Returns a calibrated tensor of the uncalibrated continuous signal and a list - of projections ops. - - Args: - uncalibrated_tensor: Tensor of shape [batch_size] of one single signal. - num_keypoints: Number of keypoints to use. - signal_name: (Required) Used as a suffix to the variable names. - keypoints_initializers: For evaluation or inference (or when resuming - training from a checkpoint) the values will be loaded from disk, so they - don't need to be given -- but in this case num_keypoints need to be - accurate. Two tensors of shape [num_keypoints]. See - load_keypoints_from_quantiles or uniform_keypoints_for_signal on how to - generate these (module keypoints_initialization). - keypoints_initializer_fns: Like keypoints_initializers but using lambda - initializers. They should be compatible with tf.compat.v1.get_variable. If - this is set, then keypoints_initializers must be None. - bound: boolean whether output of calibration must be bound. Alternatively a - dict mapping feature name to boundness. - monotonic: whether calibration has to be kept monotonic: None or 0 means no - monotonicity. Positive or negative values mean increasing or decreasing - monotonicity respectively. Alternatively a dict mapping feature name to - monotonic. - missing_input_value: If set, and if the input has this value it is assumed - to be missing and the output will either be calibrated to some value - between `[calibration_output_min, calibration_output_max]` or set to a - fixed value set by missing_output_value. Limitation: it only works for - scalars. - missing_output_value: Requires missing_input_value also to be set. If set if - will convert missing input to this value. - **regularizer_amounts: Keyword args of regularization amounts passed to - regularizers.calibrator_regularization(). Keyword names should be among - supported regularizers.CALIBRATOR_REGULARIZERS and values should be float. - - Returns: - A tuple of: - * calibrated tensor of shape [batchsize] - * None or projection ops, that must be applied at each - step (or every so many steps) to project the model to a feasible space: - used for bounding the outputs or for imposing monotonicity. - * None of a regularization loss, if regularization is configured. - - Raises: - ValueError: if dtypes are incompatible. - ValueError: if keypoints_initializers and keypoints_initializer_fns are both - set. - - - - - """ - if (keypoints_initializers is not None and - keypoints_initializer_fns is not None): - raise ValueError('keypoints_initializers and keypoints_initializer_fns ' - 'cannot both be set.') - with tf.compat.v1.variable_scope('pwl_calibration'): - # Sanity checks. - if uncalibrated_tensor.get_shape().ndims != 1: - raise ValueError( - 'one_dimensional_calibration_layer can only be used for a single ' - 'signal, so uncalibrated shape must be of form (batchsize), got %s' % - uncalibrated_tensor.get_shape()) - if missing_output_value is not None and missing_input_value is None: - raise ValueError( - 'missing_output_value can only be set if a misisng_input_value is ' - 'also set, missing_input_value=None, missing_output_values=%s' % - missing_output_value) - - # Create variables: only uses initializer if they are given. - kp_in_name = signal_name + '_keypoints_inputs' - kp_out_name = signal_name + '_keypoints_outputs' - missing_out_calibrated_name = signal_name + '_calibrated_missing_output' - - if keypoints_initializers is not None: - kp_in, kp_out = keypoints_initializers[0], keypoints_initializers[1] - if (uncalibrated_tensor.dtype != kp_in.dtype or - uncalibrated_tensor.dtype != kp_out.dtype): - raise ValueError( - 'incompatible types for signal \'%s\': uncalibrated=%s, ' - 'keypoints_initializers[input=%s, output=%s]' % - (signal_name, uncalibrated_tensor.dtype, kp_in.dtype, kp_out.dtype)) - tools.assert_shape(kp_in, [num_keypoints], - 'keypoints_initializers[input]') - tools.assert_shape(kp_out, [num_keypoints], - 'keypoints_initializers[output]') - keypoints_inputs = tf.compat.v1.get_variable( - kp_in_name, initializer=kp_in) - keypoints_outputs = tf.compat.v1.get_variable( - kp_out_name, initializer=kp_out) - - if missing_input_value is not None: - # Value to be taken by missing features. - if missing_output_value is not None: - missing_out_calibrated = tf.constant( - missing_output_value, dtype=uncalibrated_tensor.dtype) - else: - # Learned missing value, initialized by the first value of kp_out. - missing_out_calibrated = tf.compat.v1.get_variable( - missing_out_calibrated_name, initializer=kp_out[0]) - elif keypoints_initializer_fns is not None: - kp_in, kp_out = keypoints_initializer_fns[0], keypoints_initializer_fns[1] - keypoints_inputs = tf.compat.v1.get_variable( - kp_in_name, shape=[num_keypoints], initializer=kp_in) - keypoints_outputs = tf.compat.v1.get_variable( - kp_out_name, shape=[num_keypoints], initializer=kp_out) - - if missing_input_value is not None: - # Value to be taken by missing features. - if missing_output_value is not None: - missing_out_calibrated = tf.constant( - missing_output_value, dtype=uncalibrated_tensor.dtype) - else: - # Learned missing value, initialized by the first value of kp_out. - def first_kp_out(*args, **kwargs): - return kp_out(*args, **kwargs)[0] - - missing_out_calibrated = tf.compat.v1.get_variable( - missing_out_calibrated_name, shape=[], initializer=first_kp_out) - else: - # When loading a model, no initializer. - keypoints_inputs = tf.compat.v1.get_variable( - kp_in_name, shape=[num_keypoints], dtype=uncalibrated_tensor.dtype) - keypoints_outputs = tf.compat.v1.get_variable( - kp_out_name, shape=[num_keypoints], dtype=uncalibrated_tensor.dtype) - if missing_input_value is not None: - if missing_output_value is not None: - missing_out_calibrated = tf.constant( - missing_output_value, dtype=uncalibrated_tensor.dtype) - else: - missing_out_calibrated = tf.compat.v1.get_variable( - missing_out_calibrated_name, - shape=[], - dtype=uncalibrated_tensor.dtype) - - # Split missing values from normal values. - # FutureWork: move handling of missing values be moved to C++ land. - if missing_input_value is not None: - missing_mask = tf.equal(uncalibrated_tensor, - tf.constant(missing_input_value)) - mask_indices = tf.range(tf.shape(uncalibrated_tensor)[0]) - mask_indices = tf.dynamic_partition(mask_indices, - tf.cast(missing_mask, tf.int32), 2) - (uncalibrated_tensor, - missing_values) = tf.dynamic_partition(uncalibrated_tensor, - tf.cast(missing_mask, tf.int32), - 2) - - # Assign value to missing_values. - missing_values = tf.ones_like(missing_values) - missing_values *= missing_out_calibrated - - # Dense implementation. - interpolation = pwl_calibration_ops.pwl_indexing_calibrator( - uncalibrated_tensor, keypoints_inputs) - calibrated = tf.reduce_sum(interpolation * keypoints_outputs, 1) - projection_ops = None - - # Re-join missing values. - if missing_input_value is not None: - calibrated = tf.dynamic_stitch(mask_indices, [calibrated, missing_values]) - - # Boundness. - projected_keypoints_outputs = None - if bound: - bound_min_name = signal_name + '_bound_min' - bound_max_name = signal_name + '_bound_max' - # Set bound_min/max from min/max values initialized. - if keypoints_initializers is not None: - # Store bound_min and bound_max in variables because their values (from - # kp_out) are only available during train (when keypoints_initializers - # is available). During inference the value is not available. Storing - # them in variables make them available during inference. - bound_min = tf.compat.v1.get_variable( - bound_min_name, - dtype=uncalibrated_tensor.dtype, - initializer=tf.reduce_min(kp_out)) - bound_max = tf.compat.v1.get_variable( - bound_max_name, - dtype=uncalibrated_tensor.dtype, - initializer=tf.reduce_max(kp_out)) - elif keypoints_initializer_fns is not None: - # Store bound_min and bound_max in variables because their values (from - # kp_out) are only available during train (when keypoints_initializers - # is available). During inference the value is not available. Storing - # them in variables make them available during inference. - def min_kp_out(*args, **kwargs): - return tf.reduce_min(kp_out(*args, **kwargs)) - - def max_kp_out(*args, **kwargs): - return tf.reduce_max(kp_out(*args, **kwargs)) - - bound_min = tf.compat.v1.get_variable( - bound_min_name, - dtype=uncalibrated_tensor.dtype, - shape=[], - initializer=min_kp_out) - bound_max = tf.compat.v1.get_variable( - bound_max_name, - dtype=uncalibrated_tensor.dtype, - shape=[], - initializer=max_kp_out) - else: - # No need to initialize, since presumably their values will be read - # from some checkpoint. - bound_min = tf.compat.v1.get_variable( - bound_min_name, dtype=uncalibrated_tensor.dtype, shape=[]) - bound_max = tf.compat.v1.get_variable( - bound_max_name, dtype=uncalibrated_tensor.dtype, shape=[]) - projected_keypoints_outputs = tf.minimum( - tf.maximum(keypoints_outputs, bound_min), bound_max) - - # Monotonicity. - if monotonic: - # First a soft-enforcement: might not break indirect constraints. - if projected_keypoints_outputs is None: - projected_keypoints_outputs = keypoints_outputs - projected_keypoints_outputs = pwl_calibration_ops.monotonic_projection( - increasing=bool(monotonic > 0), - values=projected_keypoints_outputs, - name='project_calibration_to_monotonic') - - # Make assing_add op to projected output. - if projected_keypoints_outputs is not None: - constrained_diff = projected_keypoints_outputs - keypoints_outputs - projection_ops = tf.compat.v1.assign_add( - keypoints_outputs, - constrained_diff, - use_locking=None, - name='project_feasible') - if (bound and missing_input_value is not None and - missing_output_value is None): - # Include op bounding calibrated missing value. - projected_missing_out_calibrated = tf.minimum( - tf.maximum(missing_out_calibrated, bound_min), bound_max) - projected_missing_out_calibrated_diff = ( - projected_missing_out_calibrated - missing_out_calibrated) - projected_missing_out_calibrated_op = tf.compat.v1.assign_add( - missing_out_calibrated, - projected_missing_out_calibrated_diff, - use_locking=None, - name='project_missing_calibration_to_bounds') - projection_ops = tf.group(projection_ops, - projected_missing_out_calibrated_op) - - # Regularization - regularization = regularizers.calibrator_regularization( - keypoints_outputs, - name=signal_name + '_calibrator_regularization', - **regularizer_amounts) - return calibrated, projection_ops, regularization - - -def input_calibration_layer(columns_to_tensors, - num_keypoints, - feature_columns=None, - keypoints_initializers=None, - keypoints_initializer_fns=None, - bound=False, - monotonic=None, - missing_input_values=None, - missing_output_values=None, - dtype=tf.float32, - **regularizer_amounts): - """Creates a calibration layer for the given input and feature_columns. - - Returns a tensor with the calibrated values of the given features, a list - of the names of the features in the order they feature in the returned, and - a list of projection ops, that must be applied at each step (or every so many - steps) to project the model to a feasible space: used for bounding the outputs - or for imposing monotonic -- the list will be empty if bound and - monotonic are not set. - - Args: - columns_to_tensors: A mapping from feature name to tensors. 'string' key - means a base feature (not-transformed). If feature_columns is not set - these are the features calibrated. Otherwise the transformed - feature_columns are the ones calibrated. - num_keypoints: Number of keypoints to use. Either a single int, or a dict - mapping feature names to num_keypoints. If a value of the dict is 0 or - None the correspondent feature won't be calibrated. - feature_columns: Optional. If set to a set of FeatureColumns, these will be - the features used and calibrated. - keypoints_initializers: For evaluation or inference (or when resuming - training from a checkpoint) the values will be loaded from disk, so they - don't need to be given (leave it as None). Either a tuple of two tensors - of shape [num_keypoints], or a dict mapping feature names to pair of - tensors of shape [num_keypoints[feature_name]]. See - load_keypoints_from_quantiles or uniform_keypoints_for_signal on how to - generate these (module keypoints_initialization). - keypoints_initializer_fns: Like keypoints_initializers but using lambda - initializers. They should be compatible with tf.compat.v1.get_variable. If - this is set, then keypoints_initializers must be None. - bound: boolean whether output of calibration must be bound. Alternatively a - dict mapping feature name to boundness. - monotonic: whether calibration has to be kept monotonic: None or 0 means no - monotonic. Positive or negative values mean increasing or decreasing - monotonic respectively. Alternatively a dict mapping feature name to - monotonic. - missing_input_values: If set, and if the input has this value it is assumed - to be missing and the output will either be calibrated to some value - between `[calibration_output_min, calibration_output_max]` or set to a - fixed value set by missing_output_value. Limitation: it only works for - scalars. Either one value for all inputs, or a dict mapping feature name - to missing_input_value for the respective feature. - missing_output_values: Requires missing_input_value also to be set. If set - if will convert missing input to this value. Either one value for all - inputs, or a dict mapping feature name to missing_input_value for the - respective feature. - dtype: If any of the scalars are not given as tensors, they are converted to - tensors with this dtype. - **regularizer_amounts: Keyword args of regularization amounts passed to - regularizers.calibrator_regularization(). Keyword names should be among - supported regularizers.CALIBRATOR_REGULARIZERS and values should be - either float or {feature_name: float}. If float, then same value is - applied to all features. - - Returns: - A tuple of: - * calibrated tensor of shape [batch_size, sum(features dimensions)]. - * list of the feature names in the order they feature in the calibrated - tensor. A name may appear more than once if the feature is - multi-dimension (for instance a multi-dimension embedding) - * list of projection ops, that must be applied at each step (or every so - many steps) to project the model to a feasible space: used for bounding - the outputs or for imposing monotonicity. Empty if none are requested. - * None or tensor with regularization loss. - - Raises: - ValueError: if dtypes are incompatible. - - - """ - with tf.name_scope('input_calibration_layer'): - feature_names = tools.get_sorted_feature_names(columns_to_tensors, - feature_columns) - num_keypoints = tools.cast_to_dict(num_keypoints, feature_names, - 'num_keypoints') - bound = tools.cast_to_dict(bound, feature_names, 'bound') - monotonic = tools.cast_to_dict(monotonic, feature_names, 'monotonic') - keypoints_initializers = tools.cast_to_dict(keypoints_initializers, - feature_names, - 'keypoints_initializers') - keypoints_initializer_fns = tools.cast_to_dict(keypoints_initializer_fns, - feature_names, - 'keypoints_initializer_fns') - missing_input_values = tools.cast_to_dict(missing_input_values, - feature_names, - 'missing_input_values') - missing_output_values = tools.cast_to_dict(missing_output_values, - feature_names, - 'missing_output_values') - regularizer_amounts = { - name: tools.cast_to_dict(regularizer_amounts[name], feature_names, name) - for name in regularizer_amounts - } - - per_dimension_feature_names = [] - - # Get uncalibrated tensors, either from columns_to_tensors, or using - # feature_columns. - if feature_columns is None: - uncalibrated_features = [ - columns_to_tensors[name] for name in feature_names - ] - else: - transformed_columns_to_tensors = columns_to_tensors.copy() - dict_feature_columns = {f_col.name: f_col for f_col in feature_columns} - uncalibrated_features = [ - tools.input_from_feature_column(transformed_columns_to_tensors, - dict_feature_columns[name], dtype) - for name in feature_names - ] - - projection_ops = [] - calibrated_splits = [] - total_regularization = None - for feature_idx in range(len(feature_names)): - name = feature_names[feature_idx] - uncalibrated_feature = uncalibrated_features[feature_idx] - if uncalibrated_feature.shape.ndims == 1: - feature_dim = 1 - uncalibrated_splits = [uncalibrated_feature] - elif uncalibrated_feature.shape.ndims == 2: - feature_dim = uncalibrated_feature.shape.dims[1].value - uncalibrated_splits = tf.unstack(uncalibrated_feature, axis=1) - else: - raise ValueError( - 'feature {}: it has rank {}, but only ranks 1 or 2 are ' - 'supported; feature shape={}'.format( - name, uncalibrated_feature.shape.ndims, - uncalibrated_feature.shape)) - missing_input_value = missing_input_values[name] - missing_output_value = missing_output_values[name] - feature_regularizer_amounts = { - regularizer_name: regularizer_amounts[regularizer_name][name] - for regularizer_name in regularizer_amounts - } - - # FutureWork: make the interpolation ops handle multi-dimension values, - # so this step is not needed. - for dim_idx in range(feature_dim): - per_dimension_feature_names += [name] - split_name = name - if feature_dim > 1: - split_name = '{}_dim_{}'.format(name, dim_idx) - uncalibrated = uncalibrated_splits[dim_idx] - if not num_keypoints[name]: - # No calibration for this feature: - calibrated_splits += [uncalibrated] - if (missing_input_value is not None or - missing_output_value is not None): - raise ValueError( - 'feature %s: cannot handle missing values if feature is not ' - 'calibrated, missing_input_value=%s, missing_output_value=%s' % - (name, missing_input_value, missing_output_value)) - else: - calibrated, projection, reg = one_dimensional_calibration_layer( - uncalibrated, - num_keypoints[name], - signal_name=split_name, - keypoints_initializers=keypoints_initializers[name], - keypoints_initializer_fns=keypoints_initializer_fns[name], - bound=bound[name], - monotonic=monotonic[name], - missing_input_value=missing_input_value, - missing_output_value=missing_output_value, - **feature_regularizer_amounts) - calibrated_splits += [calibrated] - if projection is not None: - projection_ops += [projection] - total_regularization = tools.add_if_not_none(total_regularization, - reg) - - all_calibrated = tf.stack( - calibrated_splits, axis=1, name='stack_calibrated') - return (all_calibrated, per_dimension_feature_names, projection_ops, - total_regularization) - - -def calibration_layer(uncalibrated_tensor, - num_keypoints, - keypoints_initializers=None, - keypoints_initializer_fns=None, - bound=False, - monotonic=None, - missing_input_values=None, - missing_output_values=None, - name=None, - **regularizer_amounts): - """Creates a calibration layer for uncalibrated values. - - Returns a calibrated tensor of the same shape as the uncalibrated continuous - signals passed in, and a list of projection ops, that must be applied at - each step (or every so many steps) to project the model to a feasible space: - used for bounding the outputs or for imposing monotonicity -- the list will be - empty if bound and monotonic are not set. - - Args: - uncalibrated_tensor: Tensor of shape [batch_size, ...] with uncalibrated - values. - num_keypoints: Number of keypoints to use. Either a scalar value that will - be used for every uncalibrated signal, or a list of n values, per - uncalibrated signal -- uncalibrated is first flattened ( see - tf.contrib.layers.flatten) to [batch_size, n], and there should be one - value in the list per n. If a value of the list is 0 or None the - correspondent signal won't be calibrated. - keypoints_initializers: For evaluation or inference (or when resuming - training from a checkpoint) the values will be loaded from disk, so they - don't need to be given (leave it as None). Otherwise provide either a - tuple of two tensors of shape [num_keypoints], or a list of n pairs of - tensors, each of shape [num_keypoints]. In this list there should be one - pair per uncalibrated signal, just like num_keypoints above. Notice that - num_keypoints can be different per signal. - keypoints_initializer_fns: Like keypoints_initializers but using lambda - initializers. They should be compatible with tf.compat.v1.get_variable. If - this is set, then keypoints_initializers must be None. - bound: boolean whether output of calibration must be bound. Alternatively a - list of n booleans, one per uncalibrated value, like num_keypoints above. - monotonic: whether calibration is monotonic: None or 0 means no - monotonicity. Positive or negative values mean increasing or decreasing - monotonicity respectively. Alternatively a list of n monotonic values, one - per uncalibrated value, like num_keypoints above. - missing_input_values: If set, and if the input has this value it is assumed - to be missing and the output will either be calibrated to some value - between `[calibration_output_min, calibration_output_max]` or set to a - fixed value set by missing_output_value. Limitation: it only works for - scalars. Either one value for all inputs, or a list with one value per - uncalibrated value. - missing_output_values: Requires missing_input_value also to be set. If set - if will convert missing input to this value. Either one value for all - outputs, or a list with one value per uncalibrated value. - name: Name scope for operations. - **regularizer_amounts: Keyword args of regularization amounts passed to - regularizers.calibrator_regularization(). Keyword names should be among - supported regularizers.CALIBRATOR_REGULARIZERS and values should be either - float or list of floats. If float, then same value is applied to all input - signals. - - Returns: - A tuple of: - * calibrated tensor of shape [batch_size, ...], the same shape as - uncalibrated. - * list of projection ops, that must be applied at each step (or every so - many steps) to project the model to a feasible space: used for bounding - the outputs or for imposing monotonicity. Empty if none are requested. - * None or tensor with regularization loss. - - Raises: - ValueError: If dimensions don't match. - """ - with tf.name_scope(name or 'calibration_layer'): - # Flattening uncalibrated tensor [batch_Size, k1, k2, ..., kn] to - # [batch_size, k1 * k2 * ... * kn]. - uncalibrated_shape = uncalibrated_tensor.get_shape().as_list() - n = 1 - for non_batch_dim in uncalibrated_shape[1:]: - n *= non_batch_dim - flat_uncalibrated = tf.reshape( - uncalibrated_tensor, shape=[-1, n], name='flat_uncalibrated') - - num_keypoints = tools.cast_to_list(num_keypoints, n, 'num_keypoints') - keypoints_initializers = tools.cast_to_list(keypoints_initializers, n, - 'keypoints_initializers') - keypoints_initializer_fns = tools.cast_to_list(keypoints_initializer_fns, n, - 'keypoints_initializer_fns') - bound = tools.cast_to_list(bound, n, 'bound') - monotonic = tools.cast_to_list(monotonic, n, 'monotonic') - missing_input_values = tools.cast_to_list(missing_input_values, n, - 'missing_input_values') - missing_output_values = tools.cast_to_list(missing_output_values, n, - 'missing_output_values') - regularizer_amounts = { - name: tools.cast_to_list(regularizer_amounts[name], n, name) - for name in regularizer_amounts - } - - signal_names = ['signal_%d' % ii for ii in range(n)] - - uncalibrated_splits = tf.unstack(flat_uncalibrated, axis=1) - calibrated_splits = [] - projection_ops = [] - total_regularization = None - for ii in range(n): - if not num_keypoints[ii]: - # No calibration for this signal. - calibrated_splits += [uncalibrated_splits[ii]] - else: - signal_regularizer_amounts = { - regularizer_name: regularizer_amounts[regularizer_name][ii] - for regularizer_name in regularizer_amounts - } - calibrated, projection, reg = one_dimensional_calibration_layer( - uncalibrated_splits[ii], - num_keypoints[ii], - signal_name=signal_names[ii], - keypoints_initializers=keypoints_initializers[ii], - keypoints_initializer_fns=keypoints_initializer_fns[ii], - bound=bound[ii], - monotonic=monotonic[ii], - missing_input_value=missing_input_values[ii], - missing_output_value=missing_output_values[ii], - **signal_regularizer_amounts) - calibrated_splits += [calibrated] - if projection is not None: - projection_ops += [projection] - total_regularization = tools.add_if_not_none(total_regularization, reg) - flat_calibrated = tf.stack( - calibrated_splits, axis=1, name='stack_calibrated') - reshaped_calibrated = tf.reshape( - flat_calibrated, - shape=tf.shape(uncalibrated_tensor), - name='reshape_calibrated') - return reshaped_calibrated, projection_ops, total_regularization diff --git a/tensorflow_lattice/python/lib/pwl_calibration_layers_test.py b/tensorflow_lattice/python/lib/pwl_calibration_layers_test.py deleted file mode 100644 index 807b8a3..0000000 --- a/tensorflow_lattice/python/lib/pwl_calibration_layers_test.py +++ /dev/null @@ -1,586 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for TensorFlow Lattice's pwl_calibration_layers module.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from tensorflow_lattice.python.lib import keypoints_initialization -from tensorflow_lattice.python.lib import pwl_calibration_layers -from tensorflow_lattice.python.lib import tools - -_DEFAULT_OUTPUT_MIN = 200 -_DEFAULT_OUTPUT_MAX = 300 - - -def _get_variable_by_name(name): - return tf.compat.v1.get_default_graph().get_tensor_by_name(name) - - -class PwlCalibratorLayersTestCase(tf.test.TestCase): - - def _BuildInputs(self, x0, x1): - """Returns input_fn, feature_names and feature_columns.""" - - def _input_fn(): - return { - 'x0': tf.constant(x0, dtype=tf.float32), - 'x1': tf.constant(x1, dtype=tf.float32), - } - - feature_names = ['x0', 'x1'] - x0_dim = 1 if not isinstance(x0[0], list) else len(x0[0]) - x1_dim = 1 if not isinstance(x1[0], list) else len(x1[0]) - feature_columns = { - tf.feature_column.numeric_column(key='x0', shape=(x0_dim,)), - tf.feature_column.numeric_column(key='x1', shape=(x1_dim,)), - } - return _input_fn, feature_names, feature_columns - - def _CheckOneDimensionalCalibrationLayer(self, sess, uncalibrated, calibrated, - value, want): - got = sess.run(calibrated, feed_dict={uncalibrated: value}) - self.assertAllClose(got, want) - - def _UniformKeypoints(self, - num_keypoints, - output_min=_DEFAULT_OUTPUT_MIN, - output_max=_DEFAULT_OUTPUT_MAX): - return keypoints_initialization.uniform_keypoints_for_signal( - num_keypoints=num_keypoints, - input_min=tf.constant(0.0, dtype=tf.float32), - input_max=tf.constant(1.0, dtype=tf.float32), - output_min=output_min, - output_max=output_max, - dtype=tf.float32) - - def testOneDimensionalCalibrationLayer(self): - with tf.Graph().as_default(): - num_keypoints = 10 - keypoints_init = self._UniformKeypoints(num_keypoints) - uncalibrated = tf.compat.v1.placeholder( - shape=tf.TensorShape([tf.compat.v1.Dimension(None)]), - dtype=tf.float32) - calibrated, projection, regularization = ( - pwl_calibration_layers.one_dimensional_calibration_layer( - uncalibrated, - num_keypoints=num_keypoints, - signal_name='test_one_dimensional_calibration_layer', - keypoints_initializers=keypoints_init)) - self.assertEqual(projection, None) - self.assertEqual(regularization, None) - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - self._CheckOneDimensionalCalibrationLayer(sess, uncalibrated, - calibrated, [0.5], [250.]) - self._CheckOneDimensionalCalibrationLayer(sess, uncalibrated, - calibrated, [0.2, 0.7], - [220., 270.]) - - def testOneDimensionalCalibrationLambda(self): - with tf.Graph().as_default(): - num_keypoints = 10 - - def kp_in_fn(*args, **kwargs): - del args - del kwargs - return tf.linspace(0., 1., num_keypoints) - - def kp_out_fn(*args, **kwargs): - del args - del kwargs - return tf.linspace( - float(_DEFAULT_OUTPUT_MIN), float(_DEFAULT_OUTPUT_MAX), - num_keypoints) - - uncalibrated = tf.compat.v1.placeholder( - shape=tf.TensorShape([tf.compat.v1.Dimension(None)]), - dtype=tf.float32) - calibrated, _, regularization = ( - pwl_calibration_layers.one_dimensional_calibration_layer( - uncalibrated, - missing_input_value=0.21, - num_keypoints=num_keypoints, - bound=True, - signal_name='test_one_dimensional_calibration_layer_lambda', - keypoints_initializer_fns=(kp_in_fn, kp_out_fn))) - self.assertEqual(regularization, None) - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - self._CheckOneDimensionalCalibrationLayer(sess, uncalibrated, - calibrated, [0.5], [250.]) - self._CheckOneDimensionalCalibrationLayer(sess, uncalibrated, - calibrated, [0.2, 0.7], - [220., 270.]) - - def testOneDimensionalCalibrationRaises(self): - with tf.Graph().as_default(): - num_keypoints = 10 - - def kp_in_fn(*args, **kwargs): - del args - del kwargs - return tf.linspace(0., 1., num_keypoints) - - def kp_out_fn(*args, **kwargs): - del args - del kwargs - return tf.linspace( - float(_DEFAULT_OUTPUT_MIN), float(_DEFAULT_OUTPUT_MAX), - num_keypoints) - - keypoints_init = self._UniformKeypoints(num_keypoints) - uncalibrated = tf.compat.v1.placeholder( - shape=tf.TensorShape([tf.compat.v1.Dimension(None)]), - dtype=tf.float32) - self.assertRaises( - ValueError, - pwl_calibration_layers.one_dimensional_calibration_layer, - uncalibrated, - num_keypoints=num_keypoints, - signal_name='test_one_dimensional_calibration_layer', - keypoints_initializers=keypoints_init, - keypoints_initializer_fns=(kp_in_fn, kp_out_fn)) - - def testOneDimensionalCalibrationLayerRegularization(self): - with tf.Graph().as_default(): - num_keypoints = 10 - keypoints_init = self._UniformKeypoints(num_keypoints) - uncalibrated = tf.compat.v1.placeholder( - shape=tf.TensorShape([tf.compat.v1.Dimension(None)]), - dtype=tf.float32) - _, _, regularization = ( - pwl_calibration_layers.one_dimensional_calibration_layer( - uncalibrated, - num_keypoints=num_keypoints, - signal_name='test_one_dimensional_calibration_layer', - l1_reg=1.0, - l2_reg=1.0, - l1_laplacian_reg=1.0, - l2_laplacian_reg=1.0, - keypoints_initializers=keypoints_init)) - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - got = sess.run(regularization) - expected_value = 638896.25 - self.assertAlmostEqual(got, expected_value, delta=1e-1) - - def testInputCalibrationLayer(self): - x0 = [[0.1], [0.2], [0.3], [0.3], [-1.]] - x1 = [[0.9], [0.8], [0.7], [-1.], [0.7]] - input_fn, feature_names, feature_columns = self._BuildInputs(x0, x1) - num_keypoints = 10 - - # Test calibration of two features. - with tf.Graph().as_default(): - keypoints_init = self._UniformKeypoints(num_keypoints) - columns_to_tensors = input_fn() - calibrated, feature_names, projection_ops, regularization = ( - pwl_calibration_layers.input_calibration_layer( - columns_to_tensors=columns_to_tensors, - feature_columns=feature_columns, - num_keypoints=num_keypoints, - keypoints_initializers=keypoints_init, - missing_input_values=-1., - missing_output_values=7.)) - self.assertEqual(feature_names, ['x0', 'x1']) - self.assertEqual(projection_ops, []) - self.assertEqual(regularization, None) - got = keypoints_initialization._materialize_locally( - calibrated, num_steps=1) - self.assertAllClose( - got, - [[210., 290.], [220., 280.], [230., 270.], [230., 7.], [7., 270.]]) - - def testInputCalibrationLayerNonCalibrated(self): - x0 = [[0.1], [0.2], [0.3], [0.3], [-1.]] - x1 = [[0.9], [0.8], [0.7], [-1.], [0.7]] - input_fn, feature_names, feature_columns = self._BuildInputs(x0, x1) - num_keypoints = 10 - - # Test case where one feature is not calibrated. - with tf.Graph().as_default(): - keypoints_init = self._UniformKeypoints(num_keypoints) - columns_to_tensors = input_fn() - - calibrated, feature_names, projection_ops, regularization = ( - pwl_calibration_layers.input_calibration_layer( - columns_to_tensors=columns_to_tensors, - feature_columns=feature_columns, - num_keypoints={ - 'x0': num_keypoints, - 'x1': 0 - }, - keypoints_initializers=keypoints_init, - missing_input_values={ - 'x0': -1., - tools.DEFAULT_NAME: None - }, - missing_output_values={ - 'x0': 7., - tools.DEFAULT_NAME: None - })) - self.assertEqual(projection_ops, []) - self.assertEqual(feature_names, ['x0', 'x1']) - self.assertEqual(regularization, None) - got = keypoints_initialization._materialize_locally( - calibrated, num_steps=1) - self.assertAllClose( - got, [[210., 0.9], [220., 0.8], [230., 0.7], [230., -1.], [7., 0.7]]) - - def testInputCalibrationLayerMultiDimensional(self): - x0 = [[0.1, 0.9], [0.2, 0.8], [0.3, 0.7]] - x1 = [[0.9, 1.2], [0.8, 1.1], [0.7, 0.2]] - input_fn, feature_names, feature_columns = self._BuildInputs(x0, x1) - num_keypoints = 10 - - # Test case where feature columns are multi-dimensional. - with tf.Graph().as_default(): - keypoints_init = self._UniformKeypoints(num_keypoints) - columns_to_tensors = input_fn() - calibrated, feature_names, projection_ops, regularization = ( - pwl_calibration_layers.input_calibration_layer( - columns_to_tensors=columns_to_tensors, - feature_columns=feature_columns, - num_keypoints={ - 'x0': num_keypoints, - 'x1': 0 - }, - keypoints_initializers=keypoints_init)) - self.assertEqual(projection_ops, []) - self.assertEqual(feature_names, ['x0', 'x0', 'x1', 'x1']) - self.assertEqual(regularization, None) - got = keypoints_initialization._materialize_locally( - calibrated, num_steps=1) - self.assertAllClose(got, [[210., 290., 0.9, 1.2], [220., 280., 0.8, 1.1], - [230., 270., 0.7, 0.2]]) - - def testInputCalibrationLayerRegularization(self): - x0 = [0.1, 0.2, 0.7] - x1 = [0.9, 0.8, 0.7] - input_fn, _, feature_columns = self._BuildInputs(x0, x1) - num_keypoints = 10 - - with tf.Graph().as_default(): - keypoints_init = self._UniformKeypoints(num_keypoints) - columns_to_tensors = input_fn() - _, _, _, regularization = ( - pwl_calibration_layers.input_calibration_layer( - columns_to_tensors=columns_to_tensors, - feature_columns=feature_columns, - num_keypoints={ - 'x0': num_keypoints, - 'x1': num_keypoints - }, - l1_reg={ - 'x0': 1.0, - 'x1': 2.0 - }, - l2_reg={ - 'x0': 0.5, - 'x1': None - }, - l1_laplacian_reg={ - 'x0': None, - 'x1': 3.0 - }, - l2_laplacian_reg={ - 'x0': None, - 'x1': 5.0 - }, - keypoints_initializers=keypoints_init)) - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - got = sess.run(regularization) - expected_value = 330948.12 - self.assertAlmostEqual(got, expected_value, delta=1e-1) - - def testCalibrationLayer(self): - with tf.Graph().as_default(): - # Shape: [batch_size=2, 3, 2] - uncalibrated = tf.constant([ - [[0.1, 0.2], [0.9, 0.8], [0.4, 0.6]], - [[0.2, 0.3], [1.0, 0.9], [0.5, 0.7]], - ]) - kp_init_0 = self._UniformKeypoints(10) - kp_init_1 = self._UniformKeypoints(5, 0, 1000) - num_keypoints = [10, 10, 10, 10, 5, 5] - kp_init = [ - kp_init_0, kp_init_0, kp_init_0, kp_init_0, kp_init_1, kp_init_1 - ] - calibrated, projection_ops, regularization = ( - pwl_calibration_layers.calibration_layer( - uncalibrated, - num_keypoints, - keypoints_initializers=kp_init, - name='test')) - self.assertEqual(projection_ops, []) - self.assertEqual(regularization, None) - got = keypoints_initialization._materialize_locally( - calibrated, num_steps=1) - want = [ - [[210., 220.], [290., 280.], [400., 600.]], - [[220., 230.], [300., 290.], [500., 700.]], - ] - self.assertAllClose(got, want) - - def testCalibrationLayerRegularization(self): - with tf.Graph().as_default(): - # Shape: [batch_size=2, 3, 2] - uncalibrated = tf.constant([ - [[0.1, 0.2], [0.9, 0.8], [0.4, 0.6]], - [[0.2, 0.3], [1.0, 0.9], [0.5, 0.7]], - ]) - kp_init_0 = self._UniformKeypoints(10) - kp_init_1 = self._UniformKeypoints(5, 0, 1000) - num_keypoints = [10, 10, 10, 10, 5, 5] - kp_init = [ - kp_init_0, kp_init_0, kp_init_0, kp_init_0, kp_init_1, kp_init_1 - ] - _, _, regularization = ( - pwl_calibration_layers.calibration_layer( - uncalibrated, - num_keypoints, - keypoints_initializers=kp_init, - l1_reg=0.1, - l2_reg=1.0, - l1_laplacian_reg=[0.3, 0.1, 0.2, 0.3, 0.4, 0.5], - l2_laplacian_reg=[None, 1.0, None, None, None, None], - name='test')) - - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - got = sess.run(regularization) - expected_value = 6294341.5 - self.assertAlmostEqual(got, expected_value, delta=1e-1) - - def testCalibrationLayerWithUnknownBatchSize(self): - with tf.Graph().as_default(): - # Shape: [batch_size=2, 3, 2] - uncalibrated = tf.compat.v1.placeholder(tf.float32, shape=[None, 3, 2]) - kp_init_0 = self._UniformKeypoints(10) - kp_init_1 = self._UniformKeypoints(5, 0, 1000) - num_keypoints = [10, 10, 10, 10, 5, 5] - kp_init = [ - kp_init_0, kp_init_0, kp_init_0, kp_init_0, kp_init_1, kp_init_1 - ] - calibrated, projection_ops, regularization = ( - pwl_calibration_layers.calibration_layer( - uncalibrated, - num_keypoints, - keypoints_initializers=kp_init, - name='test')) - self.assertEqual(projection_ops, []) - self.assertEqual(regularization, None) - got = keypoints_initialization._materialize_locally( - calibrated, - num_steps=1, - feed_dict={ - uncalibrated: [ - [[0.1, 0.2], [0.9, 0.8], [0.4, 0.6]], - [[0.2, 0.3], [1.0, 0.9], [0.5, 0.7]], - ] - }) - want = [ - [[210., 220.], [290., 280.], [400., 600.]], - [[220., 230.], [300., 290.], [500., 700.]], - ] - self.assertAllClose(got, want) - - def testBoundness(self): - # Create a bound calibration, then set it outside the bounds and check - # that it is projected back to the bounds. - with tf.Graph().as_default(): - num_keypoints = 3 - keypoints_init = keypoints_initialization.uniform_keypoints_for_signal( - num_keypoints=num_keypoints, - input_min=tf.constant(0.0, dtype=tf.float32), - input_max=tf.constant(1.0, dtype=tf.float32), - output_min=0., - output_max=1., - dtype=tf.float32) - uncalibrated = tf.compat.v1.placeholder( - shape=tf.TensorShape([tf.compat.v1.Dimension(None)]), - dtype=tf.float32) - with tf.compat.v1.variable_scope('test_boundness'): - _, projection, regularization = ( - pwl_calibration_layers.one_dimensional_calibration_layer( - uncalibrated, - num_keypoints=num_keypoints, - bound=True, - signal_name='bounded_x', - keypoints_initializers=keypoints_init)) - self.assertIsNotNone(projection) - self.assertEqual(regularization, None) - - with self.session() as sess: - # First initialize keypoints (and all variables) - sess.run(tf.compat.v1.global_variables_initializer()) - kp_out = _get_variable_by_name( - 'test_boundness/pwl_calibration/bounded_x_keypoints_outputs:0') - kp_out_values = sess.run(kp_out) - self.assertAllClose(kp_out_values, [0.0, 0.5, 1.0]) - - # Assign values to variable beyond bounds. - out_of_bounds = [-0.1, 1.2, 0.9] - sess.run( - tf.compat.v1.assign(kp_out, - tf.constant(out_of_bounds, dtype=tf.float32))) - kp_out_values = sess.run(kp_out) - self.assertAllClose(kp_out_values, out_of_bounds) - - # Execute projection. - sess.run(projection) - kp_out_values = sess.run(kp_out) - self.assertAllClose(kp_out_values, [0.0, 1.0, 0.9]) - - def testMonotonicity(self): - # Create a monotonic calibration, then set it in a non-monotonic way and - # check that it is projected back to monotonicity. - with tf.Graph().as_default(): - num_keypoints = 5 - keypoints_init = keypoints_initialization.uniform_keypoints_for_signal( - num_keypoints=num_keypoints, - input_min=tf.constant(0.0, dtype=tf.float32), - input_max=tf.constant(1.0, dtype=tf.float32), - output_min=0., - output_max=1., - dtype=tf.float32) - uncalibrated = tf.compat.v1.placeholder( - shape=tf.TensorShape([tf.compat.v1.Dimension(None)]), - dtype=tf.float32) - with tf.compat.v1.variable_scope('test_monotonicity'): - _, projection, regularization = ( - pwl_calibration_layers.one_dimensional_calibration_layer( - uncalibrated, - num_keypoints=num_keypoints, - monotonic=1, - signal_name='monotonic_x', - keypoints_initializers=keypoints_init)) - self.assertIsNotNone(projection) - self.assertEqual(regularization, None) - - with self.session() as sess: - # First initialize keypoints (and all variables) - sess.run(tf.compat.v1.global_variables_initializer()) - kp_out = _get_variable_by_name( - 'test_monotonicity/pwl_calibration/monotonic_x_keypoints_outputs:0') - kp_out_values = sess.run(kp_out) - self.assertAllClose(kp_out_values, [0.0, 0.25, 0.5, 0.75, 1.0]) - - # Assign non_monotonic calibration. - non_monotonic = [4., 5., 0., 4., -3.] - sess.run( - tf.compat.v1.assign(kp_out, - tf.constant(non_monotonic, dtype=tf.float32))) - kp_out_values = sess.run(kp_out) - self.assertAllClose(kp_out_values, non_monotonic) - - # Execute projection. - sess.run(projection) - kp_out_values = sess.run(kp_out) - self.assertAllClose(kp_out_values, [2., 2., 2., 2., 2.]) - - def testMissingFixedOutput(self): - with tf.Graph().as_default(): - num_keypoints = 10 - keypoints_init = self._UniformKeypoints(num_keypoints) - uncalibrated = tf.compat.v1.placeholder( - shape=tf.TensorShape([tf.compat.v1.Dimension(None)]), - dtype=tf.float32) - calibrated, projection, regularization = ( - pwl_calibration_layers.one_dimensional_calibration_layer( - uncalibrated, - num_keypoints=num_keypoints, - signal_name='test_missing_fixed_output', - keypoints_initializers=keypoints_init, - bound=True, - missing_input_value=-1., - missing_output_value=7.)) - self.assertNotEqual(projection, None) - self.assertEqual(regularization, None) - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - # Mix of missing and calibrated: - self._CheckOneDimensionalCalibrationLayer(sess, uncalibrated, - calibrated, [0.5, -1.], - [250., 7.]) - # Only calibrated: - self._CheckOneDimensionalCalibrationLayer(sess, uncalibrated, - calibrated, [0.2, 0.7], - [220., 270.]) - # Only missing: - self._CheckOneDimensionalCalibrationLayer(sess, uncalibrated, - calibrated, [-1., -1.], - [7., 7.]) - - # Projection shouldn't affect the missing output value, even though - # it is outside the bounds. - sess.run([projection]) - self._CheckOneDimensionalCalibrationLayer(sess, uncalibrated, - calibrated, [-1., -1.], - [7., 7.]) - - def testMissingCalibratedOutput(self): - with tf.Graph().as_default(): - # With calibration: - num_keypoints = 10 - keypoints_init = self._UniformKeypoints(num_keypoints) - uncalibrated = tf.compat.v1.placeholder( - shape=tf.TensorShape([tf.compat.v1.Dimension(None)]), - dtype=tf.float32) - calibrated, projection, regularization = ( - pwl_calibration_layers.one_dimensional_calibration_layer( - uncalibrated, - num_keypoints=num_keypoints, - signal_name='test_missing_calibrated_output', - keypoints_initializers=keypoints_init, - bound=True, - missing_input_value=-1.)) - self.assertNotEqual(projection, None) - self.assertEqual(regularization, None) - with self.session() as sess: - sess.run(tf.compat.v1.global_variables_initializer()) - self._CheckOneDimensionalCalibrationLayer(sess, uncalibrated, - calibrated, [0.5, -1.], - [250., _DEFAULT_OUTPUT_MIN]) - - # Set out-of-bound value for missing value. - missing_calibrated_output = _get_variable_by_name( - 'pwl_calibration/' - 'test_missing_calibrated_output_calibrated_missing_output:0') - sess.run([tf.compat.v1.assign(missing_calibrated_output, 700.0)]) - self._CheckOneDimensionalCalibrationLayer(sess, uncalibrated, - calibrated, [-1.], [700.]) - - # Project to bound. - sess.run(projection) - self._CheckOneDimensionalCalibrationLayer(sess, uncalibrated, - calibrated, [-1.], - [_DEFAULT_OUTPUT_MAX]) - - # Gradient wrt missing_calibrated_output should be 1.0 - d_calibrated_wrt_d_output = tf.gradients(calibrated, - missing_calibrated_output) - got = sess.run( - d_calibrated_wrt_d_output, feed_dict={uncalibrated: [-1.]}) - self.assertAllClose(got, [1.]) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_lattice/python/lib/regularizers.py b/tensorflow_lattice/python/lib/regularizers.py deleted file mode 100644 index 65337a8..0000000 --- a/tensorflow_lattice/python/lib/regularizers.py +++ /dev/null @@ -1,598 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A collection of TensorFlow Lattice regularizers.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import copy - -import tensorflow as tf - -from tensorflow_lattice.python.lib import tools - - -def _calibrator_laplacian(output_keypoints, - l1_reg=None, - l2_reg=None, - name='calibrator_laplacian'): - """Returns a calibrator laplacian regularization. - - A calibrator laplacian regularization = - l1_reg * ||output_keypoints[1:end] - output_keypoints[0:end-1]||_1 - + l2_reg * ||output_keypoints[1:end] - output_keypoints[0:end-1]||_2^2 - - Args: - output_keypoints: (Rank-1 tensor with shape [num_keypoints]) 1d calibrator's - output keypoints tensor. - l1_reg: (float) l1 regularization amount. - l2_reg: (float) l2 regularization amount. - name: name scope of calibrator laplacian regularizer. - - Returns: - A rank-0 tensor (scalar) that contains regularizer - or None if there is no regularization. This can happen if l1_reg and l2_reg - amounts are not set, or num_keypoints <= 1. - - Raises: - ValueError: * If output_keypoints is not rank-1 tensor. - * If the shape of output_keypoints is unknown. - """ - dims = output_keypoints.shape.as_list() - if len(dims) != 1: - raise ValueError('calibrator_laplacian expects output_keypoints as a ' - 'rank-1 tensor but got shape: %s' % dims) - num_kpts = dims[0] - if num_kpts is None: - raise ValueError('calibrator_laplacian expects output_keypoints dimension ' - 'to be known, but the first dimension is not set.') - - if num_kpts <= 1 or (l1_reg is None and l2_reg is None): - return None - - reg = None - with tf.name_scope(name): - diff = ( - tf.slice(output_keypoints, [1], [num_kpts - 1]) - tf.slice( - output_keypoints, [0], [num_kpts - 1])) - if l1_reg: - reg = tools.add_if_not_none(reg, l1_reg * tf.reduce_sum(tf.abs(diff))) - if l2_reg: - reg = tools.add_if_not_none(reg, l2_reg * tf.reduce_sum(tf.square(diff))) - - return reg - - -def _calibrator_hessian(output_keypoints, - l1_reg=None, - l2_reg=None, - name='calibrator_hessian'): - """Returns a calibrator hessian regularization. - - A calibrator hessian regularization (change in slope) = - l1_reg * ||nonlinearity||_1 + l2_reg * ||nonlinearity||_2^2 - where nonlinearity is: - 2 * output_keypoints[1:end-1] - - output_keypoints[0:end-2] - - output_keypoints[2:end]. - This regularizer is zero when the output_keypoints form a linear function of - the index (and not necessarily linear in input values, e.g. when using - non-uniform input keypoints). - - Args: - output_keypoints: (Rank-1 tensor with shape [num_keypoints]) 1d calibrator's - output keypoints tensor. - l1_reg: (float) l1 regularization amount. - l2_reg: (float) l2 regularization amount. - name: name scope of calibrator hessian regularizer. - - Returns: - A rank-0 tensor (scalar) that contains regularizer or None if there is no - regularization. This can happen if l1_reg and l2_reg amounts are not set, or - num_keypoints <= 2. - - Raises: - ValueError: * If output_keypoints is not rank-1 tensor. - * If the shape of output_keypoints is unknown. - """ - dims = output_keypoints.shape.as_list() - if len(dims) != 1: - raise ValueError('calibrator_hessian expects output_keypoints as a ' - 'rank-1 tensor but got shape: %s' % dims) - num_kpts = dims[0] - if num_kpts is None: - raise ValueError('calibrator_hessian expects output_keypoints dimension ' - 'to be known, but the first dimension is not set.') - - if num_kpts < 3 or (l1_reg is None and l2_reg is None): - return None - - reg = None - with tf.name_scope(name): - slope_diff = (2 * tf.slice(output_keypoints, [1], [num_kpts - 2]) - - tf.slice(output_keypoints, [0], [num_kpts - 2]) - tf.slice( - output_keypoints, [2], [num_kpts - 2])) - if l1_reg: - reg = tools.add_if_not_none(reg, - l1_reg * tf.reduce_sum(tf.abs(slope_diff))) - if l2_reg: - reg = tools.add_if_not_none(reg, - l2_reg * tf.reduce_sum(tf.square(slope_diff))) - - return reg - - -def _calibrator_wrinkle(output_keypoints, - l1_reg=None, - l2_reg=None, - name='calibrator_wrinkle'): - """Returns a calibrator wrinkle regularization. - - A calibrator wrinkle regularization (change in second derivative) = - l1_reg * ||third_derivative||_1 + l2_reg * ||third_derivative||_2^2 - where third_derivative is: - +3 * output_keypoints[1:end-2] - -3 * output_keypoints[2:end-1] - - output_keypoints[0:end-3] - + output_keypoints[3:end]. - This regularizer is zero when the output_keypoints form a 2nd order polynomial - of the index (and not necessarily in input values, e.g. when using - non-uniform input keypoints). - - Args: - output_keypoints: (Rank-1 tensor with shape [num_keypoints]) 1d calibrator's - output keypoints tensor. - l1_reg: (float) l1 regularization amount. - l2_reg: (float) l2 regularization amount. - name: name scope of calibrator wrinkle regularizer. - - Returns: - A rank-0 tensor (scalar) that contains regularizer or None if there is no - regularization. This can happen if l1_reg and l2_reg amounts are not set, or - num_keypoints <= 3. - - Raises: - ValueError: * If output_keypoints is not rank-1 tensor. - * If the shape of output_keypoints is unknown. - """ - dims = output_keypoints.shape.as_list() - if len(dims) != 1: - raise ValueError('calibrator_wrinkle expects output_keypoints as a ' - 'rank-1 tensor but got shape: %s' % dims) - num_kpts = dims[0] - if num_kpts is None: - raise ValueError('calibrator_wrinkle expects output_keypoints dimension ' - 'to be known, but the first dimension is not set.') - - if num_kpts < 4 or (l1_reg is None and l2_reg is None): - return None - - reg = None - with tf.name_scope(name): - third_drv = (3 * tf.slice(output_keypoints, [1], [num_kpts - 3]) - - 3 * tf.slice(output_keypoints, [2], [num_kpts - 3]) - tf.slice( - output_keypoints, [0], [num_kpts - 3]) + tf.slice( - output_keypoints, [3], [num_kpts - 3])) - if l1_reg: - reg = tools.add_if_not_none(reg, - l1_reg * tf.reduce_sum(tf.abs(third_drv))) - if l2_reg: - reg = tools.add_if_not_none(reg, - l2_reg * tf.reduce_sum(tf.square(third_drv))) - - return reg - - -# List of supported calibrator regularizers. -CALIBRATOR_REGULARIZERS = [ - 'l1_reg', - 'l2_reg', - 'l1_laplacian_reg', - 'l2_laplacian_reg', - 'l1_hessian_reg', - 'l2_hessian_reg', - 'l1_wrinkle_reg', - 'l2_wrinkle_reg', -] - - -def calibrator_regularization(output_keypoints, - l1_reg=None, - l2_reg=None, - l1_laplacian_reg=None, - l2_laplacian_reg=None, - l1_hessian_reg=None, - l2_hessian_reg=None, - l1_wrinkle_reg=None, - l2_wrinkle_reg=None, - name='calibrator_regularization'): - """Returns a calibrator regularization op. - - Args: - output_keypoints: (Rank-1 tensor with shape [num_keypoints]) 1d calibrator's - output keypoints tensor. - l1_reg: (float) l1 regularization amount. - l2_reg: (float) l2 regularization amount. - l1_laplacian_reg: (float) l1 Laplacian regularization amount. - l2_laplacian_reg: (float) l2 Laplacian regularization amount. - l1_hessian_reg: (float) l1 Hessian regularization amount. - l2_hessian_reg: (float) l2 Hessian regularization amount. - l1_wrinkle_reg: (float) l1 Wrinkle regularization amount. - l2_wrinkle_reg: (float) l2 Wrinkle regularization amount. - name: name scope of calibrator regularization. - - Returns: - Rank-0 tensor (scalar) that contains calibrator regularization. - - Raises: - ValueError: * If output_keypoints is not rank-1 tensor. - * If the shape of output_keypoints is unknown. - """ - with tf.name_scope(name): - reg = _calibrator_laplacian( - output_keypoints, l1_reg=l1_laplacian_reg, l2_reg=l2_laplacian_reg) - reg = tools.add_if_not_none( - reg, - _calibrator_hessian( - output_keypoints, l1_reg=l1_hessian_reg, l2_reg=l2_hessian_reg)) - reg = tools.add_if_not_none( - reg, - _calibrator_wrinkle( - output_keypoints, l1_reg=l1_wrinkle_reg, l2_reg=l2_wrinkle_reg)) - if l1_reg: - reg = tools.add_if_not_none( - reg, l1_reg * tf.reduce_sum(tf.abs(output_keypoints))) - if l2_reg: - reg = tools.add_if_not_none( - reg, l2_reg * tf.reduce_sum(tf.square(output_keypoints))) - - return reg - - -def _lattice_laplacian(lattice_param, - lattice_sizes, - l1_reg=None, - l2_reg=None, - name='lattice_laplacian'): - """Returns a lattice laplacian regularization. - - Laplacian regularizers penalize the difference between adjacent vertices in - multi-cell lattice. See Lattice Regression, NIPS, 2009 for the details, but - we provide a 2d example in here. - - Consider a 3 x 2 lattice: - 3-------4--------5 - | | | - | | | - 0-------1--------2 - where the number at each node represents the parameter index. - In this case, the laplacian l1 regularizer is defined as - - reg = l1_reg[0] * (|param[1] - param[0]| + |param[2] - param[1]| - + |param[4] - param[3]| + |param[5] - param[4]|) - + l1_reg[1] * (|param[3] - param[0]| + |param[4] - param[1]| - + |param[5] - param[2]}) - where param is a lattice_param tensor assuming one output. - In l2 case, the absolute value is replaced with a square. - - If num_outputs > 1, the op is - total_reg = sum_{d=1}^{output_dim} reg(lattice_param[d, :]) - i.e., a sum across all output dimensions. - - Args: - lattice_param: (Rank-2 tensor with shape [num_outputs, num_parameters]) - lattice model's parameter. - lattice_sizes: (list of integers) lattice size of each dimension. - l1_reg: (list of floats or float) l1 regularization amount per each - lattice dimension. If float, a same number will be accrossed to all - lattice dimensions. - l2_reg: (list of floats or float) l2 regularization amount per each - lattice dimension. If float, a same number will be accrossed to all - lattice dimensions. - name: name scope of lattice laplacian regularizer. - - Returns: - A rank-0 tensor (scalar) that contains regularizer or None if there is no - regularization. This can happen if l1_reg and l2_reg amounts are not set. - - Raises: - ValueError: * lattice_param is not rank-2 tensor. - * output_dim or param_dim is unknown. - """ - dims = lattice_param.shape.as_list() - if len(dims) != 2: - raise ValueError( - 'lattice_laplacian expects lattice_param as a ' - 'rank-2 tensor but got dimensions: ', dims) - output_dim = dims[0] - param_dim = dims[1] - if output_dim is None or param_dim is None: - raise ValueError( - 'lattice_laplacian expects all the dimensions in ' - 'lattice_param to be known, but got dimensions: ', dims) - - l1_reg = tools.cast_to_list(l1_reg, len(lattice_sizes), 'laplacian_l1_reg') - l2_reg = tools.cast_to_list(l2_reg, len(lattice_sizes), 'laplacian_l2_reg') - - # Collect all dimensions that has non-trivial regularization amount. - reg_dims = [] - lattice_rank = len(lattice_sizes) - for dim in range(lattice_rank): - if l1_reg[dim] or l2_reg[dim]: - reg_dims.append(dim) - - if not reg_dims: - return None - - regularization = None - - with tf.name_scope(name): - for dim in reg_dims: - slice_size = lattice_sizes[dim] - 1 - per_dim_upper = tools.lattice_1d_slice( - lattice_param, - lattice_sizes=lattice_sizes, - lattice_axis=dim, - begin=1, - size=slice_size) - per_dim_lower = tools.lattice_1d_slice( - lattice_param, - lattice_sizes=lattice_sizes, - lattice_axis=dim, - begin=0, - size=slice_size) - per_dim_diff = per_dim_upper - per_dim_lower - if l1_reg[dim]: - regularization = tools.add_if_not_none( - regularization, l1_reg[dim] * tf.reduce_sum(tf.abs(per_dim_diff))) - if l2_reg[dim]: - regularization = tools.add_if_not_none( - regularization, - l2_reg[dim] * tf.reduce_sum(tf.square(per_dim_diff))) - - return regularization - - -def _lattice_torsion(lattice_param, - lattice_sizes, - l1_reg=None, - l2_reg=None, - name='lattice_torsion'): - """Returns a lattice torsion regularization. - - Torsion regularizers penalizes how much the lattice function twists from - side-to-side, a non-linear interactions in each 2 x 2 cells. See - Monotonic Calibrated Interpolated Look-Up Tables, JMLR, 2016 for the details, - but we provide a 2d example in here. - - Consider a 3 x 2 lattice: - 3-------4--------5 - | | | - | | | - 0-------1--------2 - where the number at each node represents the parameter index. - In this case, the torsion l2 regularizer is defined as - - reg = l2_reg * ((param[4] + param[0] - param[3] - param[1]) ** 2 - + (param[5] + param[1] - param[4] - param[2]) ** 2 - - where param is a lattice_param tensor assuming one output. - In l1 case, the squared value is replaced with the absolte value. - - If num_outputs > 1, the op is - total_reg = sum_{d=1}^{output_dim} reg(lattice_param[d, :]) - i.e., a sum across all output dimensions. - - Args: - lattice_param: (Rank-2 tensor with shape [num_outputs, num_parameters]) - lattice model's parameter. - lattice_sizes: (list of integers) lattice size of each dimension. - l1_reg: (float) l1 regularization amount. - l2_reg: (float) l2 regularization amount. - name: name scope of lattice torsion regularizer. - - Returns: - A rank-0 tensor (scalar) that contains regularizer or None if there is no - regularization. This can happen if l1_reg and l2_reg amounts are not set. - - Raises: - ValueError: * lattice_param is not rank-2 tensor. - * output_dim or param_dim is unknown. - """ - dims = lattice_param.shape.as_list() - if len(dims) != 2: - raise ValueError( - 'lattice_laplacian expects lattice_param as a ' - 'rank-2 tensor but got dimensions: ', dims) - output_dim = dims[0] - param_dim = dims[1] - lattice_rank = len(lattice_sizes) - if output_dim is None or param_dim is None: - raise ValueError( - 'lattice_laplacian expects all the dimensions in ' - 'lattice_param to be known, but got dimensions: ', dims) - - if l1_reg is None and l2_reg is None: - return None - - regularization = None - - with tf.name_scope(name): - for dim1 in range(lattice_rank - 1): - slice_size1 = lattice_sizes[dim1] - 1 - param_0x = tools.lattice_1d_slice( - lattice_param, - lattice_sizes=lattice_sizes, - lattice_axis=dim1, - begin=0, - size=slice_size1) - param_1x = tools.lattice_1d_slice( - lattice_param, - lattice_sizes=lattice_sizes, - lattice_axis=dim1, - begin=1, - size=slice_size1) - resized_lattice_sizes = copy.deepcopy(lattice_sizes) - resized_lattice_sizes[dim1] -= 1 - for dim2 in range(dim1 + 1, lattice_rank): - slice_size2 = resized_lattice_sizes[dim2] - 1 - param_00 = tools.lattice_1d_slice( - param_0x, - lattice_sizes=resized_lattice_sizes, - lattice_axis=dim2, - begin=0, - size=slice_size2) - param_01 = tools.lattice_1d_slice( - param_0x, - lattice_sizes=resized_lattice_sizes, - lattice_axis=dim2, - begin=1, - size=slice_size2) - param_10 = tools.lattice_1d_slice( - param_1x, - lattice_sizes=resized_lattice_sizes, - lattice_axis=dim2, - begin=0, - size=slice_size2) - param_11 = tools.lattice_1d_slice( - param_1x, - lattice_sizes=resized_lattice_sizes, - lattice_axis=dim2, - begin=1, - size=slice_size2) - torsion = param_00 + param_11 - param_01 - param_10 - if l1_reg: - regularization = tools.add_if_not_none( - regularization, l1_reg * tf.reduce_sum(tf.abs(torsion))) - if l2_reg: - regularization = tools.add_if_not_none( - regularization, l2_reg * tf.reduce_sum(tf.square(torsion))) - - return regularization - - -# List of supported one-dimensional lattice regularizers. -LATTICE_ONE_DIMENSIONAL_REGULARIZERS = [ - 'l1_laplacian_reg', - 'l2_laplacian_reg', -] - -# List of supported multi-dimensional lattice regularizers. -LATTICE_MULTI_DIMENSIONAL_REGULARIZERS = [ - 'l1_reg', - 'l2_reg', - 'l1_torsion_reg', - 'l2_torsion_reg', -] - -# List of supported lattice regularizers. -LATTICE_REGULARIZERS = ( - LATTICE_ONE_DIMENSIONAL_REGULARIZERS + - LATTICE_MULTI_DIMENSIONAL_REGULARIZERS) - - -def lattice_regularization(lattice_params, - lattice_sizes, - l1_reg=None, - l2_reg=None, - l1_torsion_reg=None, - l2_torsion_reg=None, - l1_laplacian_reg=None, - l2_laplacian_reg=None, - name='lattice_regularization'): - """Returns a lattice regularization op. - - Args: - lattice_params: (Rank-2 tensor with shape [output_dim, param_dim]) Lattice - parameter tensor. - lattice_sizes: (list of integers) lattice size of each dimension. - l1_reg: (float) l1 regularization amount. - l2_reg: (float) l2 regularization amount. - l1_torsion_reg: (float) l1 torsion regularization amount. - l2_torsion_reg: (float) l2 torsion regularization amount. - l1_laplacian_reg: (list of floats or float) list of L1 Laplacian - regularization amount per each dimension. If a single float value is - provided, then all diemnsion will get the same value. - l2_laplacian_reg: (list of floats or float) list of L2 Laplacian - regularization amount per each dimension. If a single float value is - provided, then all diemnsion will get the same value. - name: name scope of lattice regularization. - - Returns: - Rank-0 tensor (scalar) that contains lattice regularization. - - Raises: - ValueError: * lattice_param is not rank-2 tensor. - * output_dim or param_dim is unknown. - """ - with tf.name_scope(name): - reg = _lattice_laplacian( - lattice_params, - lattice_sizes, - l1_reg=l1_laplacian_reg, - l2_reg=l2_laplacian_reg) - reg = tools.add_if_not_none( - reg, - _lattice_torsion( - lattice_params, - lattice_sizes, - l1_reg=l1_torsion_reg, - l2_reg=l2_torsion_reg)) - if l1_reg: - reg = tools.add_if_not_none( - reg, l1_reg * tf.reduce_sum(tf.abs(lattice_params))) - if l2_reg: - reg = tools.add_if_not_none( - reg, l2_reg * tf.reduce_sum(tf.square(lattice_params))) - - return reg - - -# List of supported linear regularizers. -LINEAR_REGULARIZERS = [ - 'l1_reg', - 'l2_reg', -] - - - -def linear_regularization(linear_params, - l1_reg=None, - l2_reg=None, - name='linear_regularization'): - """Returns a linear regularization op. - - Args: - linear_params: Lattice parameter tensor. - l1_reg: (float) l1 regularization amount. - l2_reg: (float) l2 regularization amount. - name: name scope of linear regularization. - - Returns: - Rank-0 tensor (scalar) that contains linear regularization. - - Raises: - ValueError: * linear_param is not rank-2 tensor. - * output_dim or param_dim is unknown. - """ - with tf.name_scope(name): - reg = 0 - if l1_reg: - reg += l1_reg * tf.reduce_sum(tf.abs(linear_params)) - if l2_reg: - reg += l2_reg * tf.reduce_sum(tf.square(linear_params)) - - return reg if l1_reg or l2_reg else None diff --git a/tensorflow_lattice/python/lib/regularizers_test.py b/tensorflow_lattice/python/lib/regularizers_test.py deleted file mode 100644 index 50d744d..0000000 --- a/tensorflow_lattice/python/lib/regularizers_test.py +++ /dev/null @@ -1,1021 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for TensorFlow Lattice's keypoints_initialization module.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -from tensorflow_lattice.python.lib import regularizers - - -class CalibratorLaplacianTestCase(tf.test.TestCase): - - def setUp(self): - self._num_examples = 4 - self._keypoint_lists = [ - [0.0, 0.1, 1.0], # for better formatting - [-1.0, 0.2, 0.3, 0.5], - [1.11, 2.11, -1.5, -10.232], - [2.22, -51.1, 321.0, 33.22, -201.0, -50.0] - ] - # L1 regularization amount assuming 1.0 weight. - self._l1_regs = [1.0, 1.4999999999999998, 13.34199999999999, 1098.42] - # L2 regularization amount assuming 1.0 weight. - self._l2_regs = [0.8200000000000001, 1.49, 90.28, 301778.78] - - super(CalibratorLaplacianTestCase, self).setUp() - - def _runAndCheckValues(self, - output_keypoints, - expected_value, - l1_reg=None, - l2_reg=None): - output_keypoints_tensor = tf.constant( - output_keypoints, dtype=tf.float32) - reg = regularizers.calibrator_regularization( - output_keypoints_tensor, - l1_laplacian_reg=l1_reg, - l2_laplacian_reg=l2_reg) - with self.session() as sess: - reg_value = sess.run(reg) - self.assertAlmostEqual(reg_value, expected_value, delta=1e-1) - - def testL1Regularizer(self): - """Check l1 regularization amount.""" - l1_reg = 1.0 - for cnt in range(self._num_examples): - expected_value = l1_reg * self._l1_regs[cnt] - self._runAndCheckValues( - self._keypoint_lists[cnt], expected_value, l1_reg=l1_reg) - - def testL2Regularizer(self): - """Check l2 regularization amount.""" - l2_reg = 1.0 - for cnt in range(self._num_examples): - expected_value = l2_reg * self._l2_regs[cnt] - self._runAndCheckValues( - self._keypoint_lists[cnt], expected_value, l2_reg=l2_reg) - - def testL1AndL2Regularizers(self): - """Check l1 and l2 regularization amount.""" - l1_reg = 0.5 - l2_reg = 0.5 - for cnt in range(self._num_examples): - expected_value = l1_reg * self._l1_regs[cnt] + l2_reg * self._l2_regs[cnt] - self._runAndCheckValues( - self._keypoint_lists[cnt], - expected_value, - l1_reg=l1_reg, - l2_reg=l2_reg) - - def testRank2TensorExpectsError(self): - """Pass rank-2 tensor output keypoints and check the error.""" - output_keypoints_tensor = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[10, 10]) - with self.assertRaises(ValueError): - regularizers.calibrator_regularization(output_keypoints_tensor) - - def testUnknownShapeTensorExpectsError(self): - """Pass rank-1 tensor with unknown shape and check the error.""" - output_keypoints_tensor = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[None]) - with self.assertRaises(ValueError): - regularizers.calibrator_regularization(output_keypoints_tensor) - - def testOneKeypointsExpectsNone(self): - """Pass a tensor with one keypoints and check None regularizer.""" - output_keypoints_tensor = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[1]) - self.assertEqual( - regularizers.calibrator_regularization(output_keypoints_tensor), None) - - def testNoRegularizerExpectsNone(self): - """Set no l1_reg and l2_reg and check None regularizer.""" - output_keypoints_tensor = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[2]) - self.assertEqual( - regularizers.calibrator_regularization(output_keypoints_tensor), None) - - -class CalibratorHessianTestCase(tf.test.TestCase): - - def setUp(self): - self._num_examples = 4 - self._keypoint_lists = [ - [0.0, 0.1, 1.0], # for better formatting - [-1.0, 0.2, 0.3, 0.5], - [1.11, 2.11, -1.5, -10.232], - [2.22, -51.1, 321.0, 33.22, -201.0, -50.0] - ] - # L1 regularization amount assuming 1.0 weight. - self._l1_regs = [0.8, 1.2, 9.732, 1524.08] - # L2 regularization amount assuming 1.0 weight. - self._l2_regs = [0.64, 1.22, 47.486984, 767686.9128] - - super(CalibratorHessianTestCase, self).setUp() - - def _runAndCheckValues(self, - output_keypoints, - expected_value, - l1_reg=None, - l2_reg=None): - output_keypoints_tensor = tf.constant( - output_keypoints, dtype=tf.float32) - reg = regularizers.calibrator_regularization( - output_keypoints_tensor, l1_hessian_reg=l1_reg, l2_hessian_reg=l2_reg) - with self.session() as sess: - reg_value = sess.run(reg) - self.assertAlmostEqual(reg_value, expected_value, delta=1e-1) - - def testL1Regularizer(self): - """Check l1 regularization amount.""" - l1_reg = 1.0 - for cnt in range(self._num_examples): - expected_value = l1_reg * self._l1_regs[cnt] - self._runAndCheckValues( - self._keypoint_lists[cnt], expected_value, l1_reg=l1_reg) - - def testL2Regularizer(self): - """Check l2 regularization amount.""" - l2_reg = 1.0 - for cnt in range(self._num_examples): - expected_value = l2_reg * self._l2_regs[cnt] - self._runAndCheckValues( - self._keypoint_lists[cnt], expected_value, l2_reg=l2_reg) - - def testL1AndL2Regularizers(self): - """Check l1 and l2 regularization amount.""" - l1_reg = 0.5 - l2_reg = 0.5 - for cnt in range(self._num_examples): - expected_value = l1_reg * self._l1_regs[cnt] + l2_reg * self._l2_regs[cnt] - self._runAndCheckValues( - self._keypoint_lists[cnt], - expected_value, - l1_reg=l1_reg, - l2_reg=l2_reg) - - def testRank2TensorExpectsError(self): - """Pass rank-2 tensor output keypoints and check the error.""" - output_keypoints_tensor = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[10, 10]) - with self.assertRaises(ValueError): - regularizers.calibrator_regularization(output_keypoints_tensor) - - def testUnknownShapeTensorExpectsError(self): - """Pass rank-1 tensor with unknown shape and check the error.""" - output_keypoints_tensor = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[None]) - with self.assertRaises(ValueError): - regularizers.calibrator_regularization(output_keypoints_tensor) - - def testTwoKeypointsExpectsNone(self): - """Pass a tensor with one keypoints and check None regularizer.""" - output_keypoints_tensor = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[2]) - self.assertEqual( - regularizers.calibrator_regularization(output_keypoints_tensor), None) - - def testNoRegularizerExpectsNone(self): - """Set no l1_reg and l2_reg and check None regularizer.""" - output_keypoints_tensor = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[2]) - self.assertEqual( - regularizers.calibrator_regularization(output_keypoints_tensor), None) - - -class CalibratorWrinkleTestCase(tf.test.TestCase): - - def setUp(self): - self._num_examples = 4 - self._keypoint_lists = [ - [0.1, 0.1, 0.1, 0.1], # constant - [1.0, 2.0, 3.0, 4.0], # linear - [0.0, 1.0, 4.0, 9.0], # 2nd degree polynomial - [0.0, 1.0, 4.0, 11.0] - ] - # L1 regularization amount assuming 1.0 weight. - self._l1_regs = [0.0, 0.0, 0.0, 2.0] - # L2 regularization amount assuming 1.0 weight. - self._l2_regs = [0.0, 0.0, 0.0, 4.0] - - super(CalibratorWrinkleTestCase, self).setUp() - - def _runAndCheckValues(self, - output_keypoints, - expected_value, - l1_reg=None, - l2_reg=None): - output_keypoints_tensor = tf.constant( - output_keypoints, dtype=tf.float32) - reg = regularizers.calibrator_regularization( - output_keypoints_tensor, l1_wrinkle_reg=l1_reg, l2_wrinkle_reg=l2_reg) - with self.session() as sess: - reg_value = sess.run(reg) - self.assertAlmostEqual(reg_value, expected_value, delta=1e-1) - - def testL1Regularizer(self): - """Check l1 regularization amount.""" - l1_reg = 1.0 - for cnt in range(self._num_examples): - expected_value = l1_reg * self._l1_regs[cnt] - self._runAndCheckValues( - self._keypoint_lists[cnt], expected_value, l1_reg=l1_reg) - - def testL2Regularizer(self): - """Check l2 regularization amount.""" - l2_reg = 1.0 - for cnt in range(self._num_examples): - expected_value = l2_reg * self._l2_regs[cnt] - self._runAndCheckValues( - self._keypoint_lists[cnt], expected_value, l2_reg=l2_reg) - - def testL1AndL2Regularizers(self): - """Check l1 and l2 regularization amount.""" - l1_reg = 0.5 - l2_reg = 0.5 - for cnt in range(self._num_examples): - expected_value = l1_reg * self._l1_regs[cnt] + l2_reg * self._l2_regs[cnt] - self._runAndCheckValues( - self._keypoint_lists[cnt], - expected_value, - l1_reg=l1_reg, - l2_reg=l2_reg) - - def testRank2TensorExpectsError(self): - """Pass rank-2 tensor output keypoints and check the error.""" - output_keypoints_tensor = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[10, 10]) - with self.assertRaises(ValueError): - regularizers.calibrator_regularization(output_keypoints_tensor) - - def testUnknownShapeTensorExpectsError(self): - """Pass rank-1 tensor with unknown shape and check the error.""" - output_keypoints_tensor = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[None]) - with self.assertRaises(ValueError): - regularizers.calibrator_regularization(output_keypoints_tensor) - - def testTwoKeypointsExpectsNone(self): - """Pass a tensor with one keypoints and check None regularizer.""" - output_keypoints_tensor = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[2]) - self.assertEqual( - regularizers.calibrator_regularization(output_keypoints_tensor), None) - - def testNoRegularizerExpectsNone(self): - """Set no l1_reg and l2_reg and check None regularizer.""" - output_keypoints_tensor = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[2]) - self.assertEqual( - regularizers.calibrator_regularization(output_keypoints_tensor), None) - - -class CalibratorRegularizersTestCase(tf.test.TestCase): - - def setUp(self): - self._num_examples = 4 - self._keypoint_lists = [ - [0.0, 0.1, 1.0], # for better formatting - [-1.0, 0.2, 0.3, 0.5], - [1.11, 2.11, -1.5, -10.232], - [2.22, -51.1, 321.0, 33.22, -201.0, -50.0] - ] - # L1 regularization amount assuming 1.0 weight. - self._l1_regs = [1.1, 2.0, 14.952, 658.54] - # L2 regularization amount assuming 1.0 weight. - self._l2_regs = [1.01, 1.38, 112.628024, 149661.7068] - # L1 laplacian regularization amount assuming 1.0 weight. - self._l1_laplacian_regs = [ - 1.0, 1.4999999999999998, 13.34199999999999, 1098.42 - ] - # L2 laplacian regularization amount assuming 1.0 weight. - self._l2_laplacian_regs = [0.8200000000000001, 1.49, 90.28, 301778.78] - - super(CalibratorRegularizersTestCase, self).setUp() - - def _runAndCheckValues(self, - output_keypoints, - expected_value, - l1_reg=None, - l2_reg=None, - l1_laplacian_reg=None, - l2_laplacian_reg=None): - output_keypoints_tensor = tf.constant( - output_keypoints, dtype=tf.float32) - reg = regularizers.calibrator_regularization( - output_keypoints_tensor, - l1_reg=l1_reg, - l2_reg=l2_reg, - l1_laplacian_reg=l1_laplacian_reg, - l2_laplacian_reg=l2_laplacian_reg) - with self.session() as sess: - reg_value = sess.run(reg) - self.assertAlmostEqual(reg_value, expected_value, delta=1e-1) - - def testL1Regularizer(self): - """Check l1 regularization amount.""" - l1_reg = 1.0 - for cnt in range(self._num_examples): - expected_value = l1_reg * self._l1_regs[cnt] - self._runAndCheckValues( - self._keypoint_lists[cnt], expected_value, l1_reg=l1_reg) - - def testL2Regularizer(self): - """Check l2 regularization amount.""" - l2_reg = 2.0 - for cnt in range(self._num_examples): - expected_value = l2_reg * self._l2_regs[cnt] - self._runAndCheckValues( - self._keypoint_lists[cnt], expected_value, l2_reg=l2_reg) - - def testL1AndL2Regularizers(self): - """Check l1 and l2 regularization amount.""" - l1_reg = 0.5 - l2_reg = 0.5 - for cnt in range(self._num_examples): - expected_value = l1_reg * self._l1_regs[cnt] + l2_reg * self._l2_regs[cnt] - self._runAndCheckValues( - self._keypoint_lists[cnt], - expected_value, - l1_reg=l1_reg, - l2_reg=l2_reg) - - def testAllRegularizers(self): - """Check l1, l2 and laplacian regularization amount.""" - l1_reg = 0.5 - l2_reg = 0.5 - l1_laplacian_reg = 0.5 - l2_laplacian_reg = 0.5 - for cnt in range(self._num_examples): - expected_value = l1_reg * self._l1_regs[cnt] - expected_value += l2_reg * self._l2_regs[cnt] - expected_value += l1_laplacian_reg * self._l1_laplacian_regs[cnt] - expected_value += l2_laplacian_reg * self._l2_laplacian_regs[cnt] - self._runAndCheckValues( - self._keypoint_lists[cnt], - expected_value, - l1_reg=l1_reg, - l2_reg=l2_reg, - l1_laplacian_reg=l1_laplacian_reg, - l2_laplacian_reg=l2_laplacian_reg) - - -class LatticeLaplacianTestCase(tf.test.TestCase): - - def _runAndCheckValues(self, - lattice_param, - lattice_sizes, - expected_value, - l1_reg=None, - l2_reg=None): - lattice_param_tensor = tf.constant( - lattice_param, dtype=tf.float32) - reg = regularizers.lattice_regularization( - lattice_param_tensor, - lattice_sizes, - l1_laplacian_reg=l1_reg, - l2_laplacian_reg=l2_reg) - with self.session() as sess: - reg_value = sess.run(reg) - self.assertAlmostEqual(reg_value, expected_value, delta=1e-1) - - def testZeroRegularizerValueForVariousLatticeRanks(self): - """Check zero output value for zero parameters.""" - for lattice_rank in range(2, 10): - param_dim = 2**lattice_rank - lattice_param = [[0.0] * param_dim] - self._runAndCheckValues( - lattice_param=lattice_param, - lattice_sizes=[2] * lattice_rank, - expected_value=0.0, - l1_reg=1.0, - l2_reg=1.0) - - def testL1RegularizerWithTwoByTwo(self): - """Check l1 regularization amount for two by two lattices. - - In 2 x 2 lattice, L1 Laplacian regualrizer has the following form: - - l1_first_reg = (abs(param[1] - param[0]) + abs(param[3] - param[2]) - + abs(param[5] - param[4]) + abs(param[7] - param[6]) - + abs(param[9] - param[8]) + abs(param[11] - param[10])) - - l1_second_reg = (abs(param[2] - param[0]) + abs(param[4] - param[2]) - + abs(param[3] - param[1]) + abs(param[5] - param[3]) - + abs(param[8] - param[6]) + abs(param[10] - param[8]) - + abs(param[9] - param[7]) + abs(param[11] - param[9])) - - l1_reg = l1_reg[0] * l1_first_reg + l1_reg[1] * l1_second_reg, - - where param is the lattice parameter tensor (assuming one output), - l1_first_reg is the regularization amount along the first dimension, - l1_second_reg is the regularization amount along the second dimension. - """ - self._runAndCheckValues( - lattice_param=[[0.0, 0.0, 1.0, 1.0]], - lattice_sizes=[2, 2], - expected_value=2.0, - l1_reg=1.0) - self._runAndCheckValues( - lattice_param=[[0.0, 0.0, 1.0, 1.0]], - lattice_sizes=[2, 2], - expected_value=2.0, - l1_reg=[0.0, 1.0]) - self._runAndCheckValues( - lattice_param=[[0.0, 0.0, 1.0, 1.0]], - lattice_sizes=[2, 2], - expected_value=0.0, - l1_reg=[1.0, 0.0]) - self._runAndCheckValues( - lattice_param=[[0.3312, -0.3217, -0.5, 0.1]], - lattice_sizes=[2, 2], - expected_value=2.5058, - l1_reg=1.0) - self._runAndCheckValues( - lattice_param=[[0.3312, -0.3217, -0.5, 0.1]], - lattice_sizes=[2, 2], - expected_value=1.87935, - l1_reg=[0.5, 1.0]) - self._runAndCheckValues( - lattice_param=[[0.0, 0.0, 1.0, 1.0], [0.3312, -0.3217, -0.5, 0.1]], - lattice_sizes=[2, 2], - expected_value=4.5058, - l1_reg=1.0) - - def testL1RegularizerWithTwoByThreeByTwo(self): - """Check l1 regularization amount for two by three by two lattices. - - In 2 x 3 x 2 lattice, L1 Laplacian regualrizer has the following form: - - l1_first_reg = (abs(param[1] - param[0]) + abs(param[3] - param[2]) - + abs(param[5] - param[4]) + abs(param[7] - param[6]) - + abs(param[9] - param[8]) + abs(param[11] - param[10])) - l1_second_reg = (abs(param[2] - param[0]) + abs(param[4] - param[2]) - + abs(param[3] - param[1]) + abs(param[5] - param[3]) - + abs(param[8] - param[6]) + abs(param[10] - param[8]) - + abs(param[9] - param[7]) + abs(param[11] - param[9])) - l1_third_reg = (abs(param[6] - param[0]) + abs(param[7] - param[1]) - + abs(param[8] - param[2]) + abs(param[9] - param[3]) - + abs(param[10] - param[4]) + abs(param[11] - param[5])) - - l1_reg = l1_reg[0] * l1_first_reg + l1_reg[1] * l1_second_reg - + l1_reg[2] * l1_third_reg, - - where param is the lattice parameter tensor (assuming one output), - l1_first_reg is the regularization amount along the first dimension, - l1_second_reg is the regularization amount along the second dimension. - l1_third_reg is the regularization amount along the third dimension. - """ - self._runAndCheckValues( - lattice_param=[[ - 1.11, 2.22, -3.22, 0.33, -0.221, 3.123, -0.477, 1.22, 3.221, 11.22, - 22.12, 33.11 - ]], - lattice_sizes=[2, 3, 2], - expected_value=28.69, - l1_reg=[1.0, 0.0, 0.0]) - self._runAndCheckValues( - lattice_param=[[ - 1.11, 2.22, -3.22, 0.33, -0.221, 3.123, -0.477, 1.22, 3.221, 11.22, - 22.12, 33.11 - ]], - lattice_sizes=[2, 3, 2], - expected_value=66.499, - l1_reg=[0.0, 1.0, 0.0]) - self._runAndCheckValues( - lattice_param=[[ - 1.11, 2.22, -3.22, 0.33, -0.221, 3.123, -0.477, 1.22, 3.221, 11.22, - 22.12, 33.11 - ]], - lattice_sizes=[2, 3, 2], - expected_value=72.246, - l1_reg=[0.0, 0.0, 1.0]) - self._runAndCheckValues( - lattice_param=[[ - 1.11, 2.22, -3.22, 0.33, -0.221, 3.123, -0.477, 1.22, 3.221, 11.22, - 22.12, 33.11 - ]], - lattice_sizes=[2, 3, 2], - expected_value=167.435, - l1_reg=1.0) - self._runAndCheckValues( - lattice_param=[[ - 1.11, 2.22, -3.22, 0.33, -0.221, 3.123, -0.477, 1.22, 3.221, 11.22, - 22.12, 33.11 - ], [ - -2.003, 1.221, 0.321, 0.447, 0.321, 0.446, -0.33192, 0.476, 0.8976, - -4.123, 0.487, -0.4473 - ]], - lattice_sizes=[2, 3, 2], - expected_value=199.30862, - l1_reg=1.0) - - def testL2RegularizerWithTwoByTwo(self): - """Check l2 regularization amount. - - Replacing abs to square in the formula in testL1RegulairzerWithTwoByTwo - gives L2 Laplacian. - """ - self._runAndCheckValues( - lattice_param=[[0.0, 0.0, 1.0, 1.0]], - lattice_sizes=[2, 2], - expected_value=2.0, - l2_reg=1.0) - self._runAndCheckValues( - lattice_param=[[0.0, 0.0, 1.0, 1.0]], - lattice_sizes=[2, 2], - expected_value=2.0, - l2_reg=[0.0, 1.0]) - self._runAndCheckValues( - lattice_param=[[0.0, 0.0, 1.0, 1.0]], - lattice_sizes=[2, 2], - expected_value=0.0, - l2_reg=[1.0, 0.0]) - self._runAndCheckValues( - lattice_param=[[0.3312, -0.3217, -0.5, 0.1]], - lattice_sizes=[2, 2], - expected_value=1.65500274, - l2_reg=1.0) - self._runAndCheckValues( - lattice_param=[[0.3312, -0.3217, -0.5, 0.1]], - lattice_sizes=[2, 2], - expected_value=1.261863535, - l2_reg=[0.5, 1.0]) - self._runAndCheckValues( - lattice_param=[[0.0, 0.0, 1.0, 1.0], [0.3312, -0.3217, -0.5, 0.1]], - lattice_sizes=[2, 2], - expected_value=3.65500274, - l2_reg=1.0) - - def testL2RegularizerWithTwoByThreeByTwo(self): - """Check l2 regularization amount for two by three by two lattices. - - Replacing abs to square in the formula in - testL1RegulairzerWithTwoByThreeByTwo gives L2 Laplacian. - """ - self._runAndCheckValues( - lattice_param=[[ - 1.11, 2.22, -3.22, 0.33, -0.221, 3.123, -0.477, 1.22, 3.221, 11.22, - 22.12, 33.11 - ]], - lattice_sizes=[2, 3, 2], - expected_value=2763.733801, - l2_reg=1.0) - self._runAndCheckValues( - lattice_param=[[ - 1.11, 2.22, -3.22, 0.33, -0.221, 3.123, -0.477, 1.22, 3.221, 11.22, - 22.12, 33.11 - ]], - lattice_sizes=[2, 3, 2], - expected_value=212.660846, - l2_reg=[1.0, 0.0, 0.0]) - self._runAndCheckValues( - lattice_param=[[ - 1.11, 2.22, -3.22, 0.33, -0.221, 3.123, -0.477, 1.22, 3.221, 11.22, - 22.12, 33.11 - ]], - lattice_sizes=[2, 3, 2], - expected_value=989.135355, - l2_reg=[0.0, 1.0, 0.0]) - self._runAndCheckValues( - lattice_param=[[ - 1.11, 2.22, -3.22, 0.33, -0.221, 3.123, -0.477, 1.22, 3.221, 11.22, - 22.12, 33.11 - ]], - lattice_sizes=[2, 3, 2], - expected_value=1561.9376, - l2_reg=[0.0, 0.0, 1.0]) - self._runAndCheckValues( - lattice_param=[[ - 1.11, 2.22, -3.22, 0.33, -0.221, 3.123, -0.477, 1.22, 3.221, 11.22, - 22.12, 33.11 - ], [ - -2.003, 1.221, 0.321, 0.447, 0.321, 0.446, -0.33192, 0.476, 0.8976, - -4.123, 0.487, -0.4473 - ]], - lattice_sizes=[2, 3, 2], - expected_value=2868.62393167, - l2_reg=1.0) - - def testL1AndL2Regularizers(self): - """Check l1 and l2 regularization amount.""" - self._runAndCheckValues( - lattice_param=[[ - 1.11, 2.22, -3.22, 0.33, -0.221, 3.123, -0.477, 1.22, 3.221, 11.22, - 22.12, 33.11 - ]], - lattice_sizes=[2, 3, 2], - expected_value=2931.168801, - l1_reg=1.0, - l2_reg=1.0) - self._runAndCheckValues( - lattice_param=[[ - 1.11, 2.22, -3.22, 0.33, -0.221, 3.123, -0.477, 1.22, 3.221, 11.22, - 22.12, 33.11 - ]], - lattice_sizes=[2, 3, 2], - expected_value=279.159846, - l1_reg=[0.0, 1.0, 0.0], - l2_reg=[1.0, 0.0, 0.0]) - - def testNoRegularizerExpectsNone(self): - """Set no l1_reg and l2_reg and check None regularizer.""" - lattice_param = tf.compat.v1.placeholder(dtype=tf.float32, shape=[2, 4]) - lattice_sizes = [2, 2] - self.assertEqual( - None, regularizers.lattice_regularization(lattice_param, lattice_sizes)) - - def testRank1TensorExpectsError(self): - """Pass rank-1 lattice_param tensor and check the error.""" - lattice_param = tf.compat.v1.placeholder(dtype=tf.float32, shape=[10]) - lattice_sizes = [2, 5] - with self.assertRaises(ValueError): - regularizers.lattice_regularization( - lattice_param, lattice_sizes, l1_laplacian_reg=1.0) - - def testUnknownShapeTensorExpectsError(self): - """Pass rank-2 tensor with unknown shape and check the error.""" - lattice_param = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[None, None]) - lattice_sizes = [2, 2] - with self.assertRaises(ValueError): - regularizers.lattice_regularization( - lattice_param, lattice_sizes, l1_laplacian_reg=1.0) - - def testWrongL1regularizationsExpectsError(self): - # 2 x 2 lattice - lattice_param = tf.compat.v1.placeholder(dtype=tf.float32, shape=[2, 4]) - lattice_sizes = [2, 2] - # Set 3 l1_regularizations for 2d lattice. - l1_reg = [0.0, 1.0, 0.0] - with self.assertRaises(ValueError): - regularizers.lattice_regularization( - lattice_param, lattice_sizes, l1_laplacian_reg=l1_reg) - - def testWrongL2regularizationsExpectsError(self): - # 2 x 2 lattice - lattice_param = tf.compat.v1.placeholder(dtype=tf.float32, shape=[4, 2]) - lattice_sizes = [2, 2] - # Set 3 l2_regularizations for 2d lattice. - l2_reg = [0.0, 1.0, 0.0] - with self.assertRaises(ValueError): - regularizers.lattice_regularization( - lattice_param, lattice_sizes, l2_laplacian_reg=l2_reg) - - -class LatticeTorsionTestCase(tf.test.TestCase): - - def _runAndCheckValues(self, - lattice_param, - lattice_sizes, - expected_value, - l1_reg=None, - l2_reg=None): - lattice_param_tensor = tf.constant( - lattice_param, dtype=tf.float32) - reg = regularizers.lattice_regularization( - lattice_param_tensor, - lattice_sizes, - l1_torsion_reg=l1_reg, - l2_torsion_reg=l2_reg) - with self.session() as sess: - reg_value = sess.run(reg) - self.assertAlmostEqual(reg_value, expected_value, delta=1e-1) - - def testZeroRegularizerValueForVariousLatticeRanks(self): - """Check zero output value for zero parameters.""" - for lattice_rank in range(2, 10): - param_dim = 2**lattice_rank - lattice_param = [[0.0] * param_dim] - self._runAndCheckValues( - lattice_param=lattice_param, - lattice_sizes=[2] * lattice_rank, - expected_value=0.0, - l1_reg=1.0, - l2_reg=1.0) - - def testL1RegularizerWithTwoByTwo(self): - """Check l1 regularization amount for two by two lattices. - - In 2 x 2 lattice, L1 torsion regualrizer has the following form: - l1_reg * abs(param[0] + param[3] - param[1] - param[2]), - where param is the lattice parameter tensor (assuming one output), - """ - self._runAndCheckValues( - lattice_param=[[0.0, 0.0, 1.0, 1.0]], - lattice_sizes=[2, 2], - expected_value=0.0, - l1_reg=1.0) - self._runAndCheckValues( - lattice_param=[[0.0, 1.0, 1.0, 1.0]], - lattice_sizes=[2, 2], - expected_value=1.0, - l1_reg=1.0) - self._runAndCheckValues( - lattice_param=[[0.0, 0.0, 0.0, 1.0]], - lattice_sizes=[2, 2], - expected_value=1.0, - l1_reg=1.0) - self._runAndCheckValues( - lattice_param=[[0.3312, -0.3217, -0.5, 0.1]], - lattice_sizes=[2, 2], - expected_value=1.2529, - l1_reg=1.0) - - def testL1RegularizerWithTwoByThreeByTwo(self): - """Check l1 regularization amount for two by three by two lattices. - - In 2 x 3 x 2 lattice, L1 Torsion regualrizer has the following form: - l1_reg * (abs(param[0] + param[3] - param[1] - param[2]) - + abs(param[2] + param[5] - param[3] - param[4]) - + abs(param[6] + param[9] - param[7] - param[8]) - + abs(param[8] + param[11] - param[9] - param[10]) - + abs(param[0] + param[7] - param[1] - param[6]) - + abs(param[2] + param[9] - param[3] - param[8]) - + abs(param[4] + param[11] - param[5] - param[10]) - + abs(param[0] + param[8] - param[2] - param[6]) - + abs(param[2] + param[10] - param[4] - param[8]) - + abs(param[1] + param[9] - param[3] - param[7]) - + abs(param[3] + param[11] - param[5] - param[9])) - - where param is the lattice parameter tensor (assuming one output), - """ - self._runAndCheckValues( - lattice_param=[[ - 1.11, 2.22, -3.22, 0.33, -0.221, 3.123, -0.477, 1.22, 3.221, 11.22, - 22.12, 33.11 - ]], - lattice_sizes=[2, 3, 2], - expected_value=79.536, - l1_reg=1.0) - self._runAndCheckValues( - lattice_param=[[ - -2.003, 1.221, 0.321, 0.447, 0.321, 0.446, -0.33192, 0.476, 0.8976, - -4.123, 0.487, -0.4473 - ]], - lattice_sizes=[2, 3, 2], - expected_value=30.642580000000002, - l1_reg=1.0) - self._runAndCheckValues( - lattice_param=[[ - 1.11, 2.22, -3.22, 0.33, -0.221, 3.123, -0.477, 1.22, 3.221, 11.22, - 22.12, 33.11 - ], [ - -2.003, 1.221, 0.321, 0.447, 0.321, 0.446, -0.33192, 0.476, 0.8976, - -4.123, 0.487, -0.4473 - ]], - lattice_sizes=[2, 3, 2], - expected_value=110.17858000000001, - l1_reg=1.0) - - def testL2RegularizerWithTwoByTwo(self): - """Check l2 regularization amount for two by two lattices. - - In 2 x 2 lattice, L2 torsion regualrizer has the following form: - l2_reg * (param[0] + param[3] - param[1] - param[2]) ** 2, - where param is the lattice parameter tensor (assuming one output), - """ - self._runAndCheckValues( - lattice_param=[[0.0, 0.0, 1.0, 1.0]], - lattice_sizes=[2, 2], - expected_value=0.0, - l2_reg=1.0) - self._runAndCheckValues( - lattice_param=[[0.0, 1.0, 1.0, 1.0]], - lattice_sizes=[2, 2], - expected_value=1.0, - l2_reg=1.0) - self._runAndCheckValues( - lattice_param=[[0.0, 0.0, 0.0, 1.0]], - lattice_sizes=[2, 2], - expected_value=1.0, - l2_reg=1.0) - self._runAndCheckValues( - lattice_param=[[0.3312, -0.3217, -0.5, 0.1]], - lattice_sizes=[2, 2], - expected_value=1.5697584099999997, - l2_reg=1.0) - - def testL2RegularizerWithTwoByThreeByTwo(self): - """Check l2 regularization amount for two by three by two lattices. - - Replacing abs to square in the formula in - testL1RegulairzerWithTwoByThreeByTwo gives L2 torsion. - """ - self._runAndCheckValues( - lattice_param=[[ - 1.11, 2.22, -3.22, 0.33, -0.221, 3.123, -0.477, 1.22, 3.221, 11.22, - 22.12, 33.11 - ]], - lattice_sizes=[2, 3, 2], - expected_value=956.5830999999998, - l2_reg=1.0) - self._runAndCheckValues( - lattice_param=[[ - -2.003, 1.221, 0.321, 0.447, 0.321, 0.446, -0.33192, 0.476, 0.8976, - -4.123, 0.487, -0.4473 - ]], - lattice_sizes=[2, 3, 2], - expected_value=123.2293754172, - l2_reg=1.0) - self._runAndCheckValues( - lattice_param=[[ - 1.11, 2.22, -3.22, 0.33, -0.221, 3.123, -0.477, 1.22, 3.221, 11.22, - 22.12, 33.11 - ], [ - -2.003, 1.221, 0.321, 0.447, 0.321, 0.446, -0.33192, 0.476, 0.8976, - -4.123, 0.487, -0.4473 - ]], - lattice_sizes=[2, 3, 2], - expected_value=1079.8124754172, - l2_reg=1.0) - - def testL1andL2Regularizer(self): - """Check l1 and l2 regularization amount for two by two lattices.""" - self._runAndCheckValues( - lattice_param=[[0.0, 0.0, 1.0, 1.0]], - lattice_sizes=[2, 2], - expected_value=0.0, - l1_reg=1.0, - l2_reg=1.0) - self._runAndCheckValues( - lattice_param=[[0.0, 1.0, 1.0, 1.0]], - lattice_sizes=[2, 2], - expected_value=2.0, - l1_reg=1.0, - l2_reg=1.0) - self._runAndCheckValues( - lattice_param=[[0.0, 0.0, 0.0, 1.0]], - lattice_sizes=[2, 2], - expected_value=2.0, - l1_reg=1.0, - l2_reg=1.0) - self._runAndCheckValues( - lattice_param=[[0.3312, -0.3217, -0.5, 0.1]], - lattice_sizes=[2, 2], - expected_value=2.82265841, - l1_reg=1.0, - l2_reg=1.0) - - def testRank1TensorExpectsError(self): - """Pass rank-1 tensor and check the error.""" - lattice_param = tf.compat.v1.placeholder(dtype=tf.float32, shape=[10]) - lattice_sizes = [2, 5] - with self.assertRaises(ValueError): - regularizers.lattice_regularization( - lattice_param, lattice_sizes, l1_torsion_reg=1.0) - - def testUnknownShapeTensorExpectsError(self): - """Pass rank-2 tensor with unknown shape and check the error.""" - lattice_param = tf.compat.v1.placeholder( - dtype=tf.float32, shape=[None, None]) - lattice_sizes = [2, 2] - with self.assertRaises(ValueError): - regularizers.lattice_regularization( - lattice_param, lattice_sizes, l1_torsion_reg=1.0) - - -class LatticeRegularizersTestCase(tf.test.TestCase): - - def setUp(self): - super(LatticeRegularizersTestCase, self).setUp() - self._lattice_param = [[0.3312, -0.3217, -0.5, 0.1]] - self._lattice_sizes = [2, 2] - # Regularzation amounts for weight = 1.0 - self._l1_laplacian_reg = 2.5058 - self._l2_laplacian_reg = 1.65500274 - self._l1_reg = 1.2529 - self._l2_reg = 0.47318433 - self._l1_torsion_reg = 1.2529 - self._l2_torsion_reg = 1.5697584099999997 - - def _runAndCheckValues(self, - lattice_param, - lattice_sizes, - expected_value, - l1_reg=None, - l2_reg=None, - l1_laplacian_reg=None, - l2_laplacian_reg=None, - l1_torsion_reg=None, - l2_torsion_reg=None): - lattice_param_tensor = tf.constant( - lattice_param, dtype=tf.float32) - reg = regularizers.lattice_regularization( - lattice_param_tensor, - lattice_sizes, - l1_reg=l1_reg, - l2_reg=l2_reg, - l1_laplacian_reg=l1_laplacian_reg, - l2_laplacian_reg=l2_laplacian_reg, - l1_torsion_reg=l1_torsion_reg, - l2_torsion_reg=l2_torsion_reg) - with self.session() as sess: - reg_value = sess.run(reg) - self.assertAlmostEqual(reg_value, expected_value, delta=1e-1) - - def testL1Regularizer(self): - """Check l1 regularization amount.""" - self._runAndCheckValues( - self._lattice_param, - self._lattice_sizes, - expected_value=self._l1_reg, - l1_reg=1.0) - - def testL2Regularizer(self): - """Check l2 regularization amount.""" - self._runAndCheckValues( - self._lattice_param, - self._lattice_sizes, - expected_value=self._l2_reg, - l2_reg=1.0) - - def testL1AndL2Regularizers(self): - """Check l1 and l2 regularization amount.""" - expected_value = 0.5 * self._l1_reg + 0.5 * self._l2_reg - self._runAndCheckValues( - self._lattice_param, - self._lattice_sizes, - expected_value=expected_value, - l1_reg=0.5, - l2_reg=0.5) - - def testAllRegularizers(self): - """Check l1, l2 and laplacian regularization amount.""" - expected_value = 0.5 * self._l1_reg - expected_value += 0.5 * self._l2_reg - expected_value += 0.5 * self._l1_laplacian_reg - expected_value += 0.5 * self._l2_laplacian_reg - expected_value += 0.5 * self._l1_torsion_reg - expected_value += 0.5 * self._l2_torsion_reg - self._runAndCheckValues( - self._lattice_param, - self._lattice_sizes, - expected_value=expected_value, - l1_reg=0.5, - l2_reg=0.5, - l1_laplacian_reg=0.5, - l2_laplacian_reg=0.5, - l1_torsion_reg=0.5, - l2_torsion_reg=0.5) - - -class LinearRegularizersTestCase(tf.test.TestCase): - - def setUp(self): - super(LinearRegularizersTestCase, self).setUp() - self._linear_param = [[0.3312, -0.3217, -0.5, 0.1]] - # Regularzation amounts for weight = 1.0 - self._l1_reg = 1.2529 - self._l2_reg = 0.47318433 - - def _runAndCheckValues(self, - linear_param, - expected_value, - l1_reg=None, - l2_reg=None): - linear_param_tensor = tf.constant(linear_param, dtype=tf.float32) - reg = regularizers.linear_regularization( - linear_param_tensor, l1_reg=l1_reg, l2_reg=l2_reg) - with self.session() as sess: - reg_value = sess.run(reg) - self.assertAlmostEqual(reg_value, expected_value, delta=1e-1) - - def testL1Regularizer(self): - """Check l1 regularization amount.""" - self._runAndCheckValues( - self._linear_param, expected_value=self._l1_reg, l1_reg=1.0) - - def testL2Regularizer(self): - """Check l2 regularization amount.""" - self._runAndCheckValues( - self._linear_param, expected_value=self._l2_reg, l2_reg=1.0) - - def testL1AndL2Regularizers(self): - """Check l1 and l2 regularization amount.""" - expected_value = 0.5 * self._l1_reg + 0.5 * self._l2_reg - self._runAndCheckValues( - self._linear_param, - expected_value=expected_value, - l1_reg=0.5, - l2_reg=0.5) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_lattice/python/lib/test_data.py b/tensorflow_lattice/python/lib/test_data.py deleted file mode 100644 index a7c94f1..0000000 --- a/tensorflow_lattice/python/lib/test_data.py +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Collection of test datasets.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math - -import numpy as np -import tensorflow as tf - -_NUM_EXAMPLES = 10000 -_BATCH_SIZE = 100 -_NUM_EPOCHS = 1 - - -class TestData(object): - """A test dataset class.""" - - def __init__(self, - num_examples=_NUM_EXAMPLES, - batch_size=_BATCH_SIZE, - num_epochs=_NUM_EPOCHS): - self.num_examples = num_examples - self.batch_size = batch_size - self.num_epochs = num_epochs - - # Collection of transformations that generates the label, y. - def _f(self, x): - return np.power(x, 3) + 0.1 * np.sin(x * math.pi * 8) - - def _g(self, x0, x1): - return self._f(x0) + 0.3 * (1.0 - np.square(x1)) - - def _h(self, x0, x1): - radius2 = (x0 * x0 + x1 * x1) - max_radius2 = 0.25 - return radius2 < max_radius2 - - def _i(self, x0, x1, x2): - return self._g(x0, x1) + np.choose(x2.astype(int) + 1, [11., 7., 13.]) - - def oned_input_fn(self): - """Returns an input function for one dimensional learning task. - - column 'x' is a feature column, and column 'y' is a label column. - The transformation is deterministic, where y = _f(x). - - Returns: - Function, that has signature of ()->({'x': data}, `target`) - - FutureWork: Make this use keypoints_initialization from quantiles. - """ - x = np.random.uniform(-1.0, 1.0, size=self.num_examples) - y = self._f(x) - return tf.compat.v1.estimator.inputs.numpy_input_fn( - x={'x': x}, - y=y, - batch_size=self.batch_size, - num_epochs=self.num_epochs, - shuffle=False) - - def oned_zero_weight_input_fn(self): - """Returns an input function for one dimensional learning task. - - column 'x' is a feature column, column 'zero' is a numerical column that - contains zero values and column 'y' is a label column. - The transformation is deterministic, where y = _f(x). - - Returns: - Function, that has signature of ()->({'x': data, 'zero': zeros}, `target`) - """ - x = np.random.uniform(-1.0, 1.0, size=self.num_examples) - zeros = np.zeros(shape=(self.num_examples)) - y = self._f(x) - return tf.compat.v1.estimator.inputs.numpy_input_fn( - x={ - 'x': x, - 'zero': zeros - }, - y=y, - batch_size=self.batch_size, - num_epochs=self.num_epochs, - shuffle=False) - - def twod_input_fn(self): - """Returns an input function for two dimensional learning task. - - column 'x0' and 'x1' are feature columns, and column 'y' is a label column. - The transformation is deterministic, where y = _g(x0, x1). - - Returns: - Function, that has signature of ()->({'x0': data, 'x1': data}, `target`) - """ - x0 = np.random.uniform(-1.0, 1.0, size=self.num_examples) - x1 = np.random.uniform(-1.0, 1.0, size=self.num_examples) - y = self._g(x0, x1) - - return tf.compat.v1.estimator.inputs.numpy_input_fn( - x={'x0': x0, - 'x1': x1}, - y=y, - batch_size=self.batch_size, - num_epochs=self.num_epochs, - shuffle=False) - - def twod_classificer_input_fn(self): - """Returns an input function for two dimensional classification task. - - column 'x0' and 'x1' are feature columns, and column 'y' is a label column. - The transformation is deterministic, where y = _h(x0, x1). - - Returns: - Function, that has signature of ()->({'x0': data, 'x1': data}, `target`) - """ - x0 = np.random.uniform(-1.0, 1.0, size=self.num_examples) - x1 = np.random.uniform(-1.0, 1.0, size=self.num_examples) - y = np.vectorize(self._h)(x0, x1) - - return tf.compat.v1.estimator.inputs.numpy_input_fn( - x={'x0': x0, - 'x1': x1}, - y=y, - batch_size=self.batch_size, - num_epochs=self.num_epochs, - shuffle=False) - - def threed_input_fn(self, full_data, num_epochs=None): - """Returns an input function for three-dimensional learning task. - - 'x0' and 'x1' are numeric, and 'x2' is categorical with values {-1, 0, 1}. - The transformation is deterministic and decomposable on the inputs, - that is y = _i(x0, x1, x2) = _i_0(x0)+_i_1(x1)+_i_2(x2). - - Args: - full_data: if set to true the whole data is returned in one batch. - num_epochs: number of epochs to go over the data. Takes default used - in construction if not set. - - Returns: - Function, that has signature of - ()->({'x0': data, 'x1': data, 'x2': data}, `target`) - """ - x0 = np.random.uniform(-1.0, 1.0, size=self.num_examples) - x1 = np.random.uniform(-1.0, 1.0, size=self.num_examples) - x2 = np.random.choice( - [-1., 0., 1.], size=self.num_examples, replace=True, p=[0.1, 0.7, 0.2]) - y = self._i(x0, x1, x2) - - x2_str = np.choose(x2.astype(int) + 1, ['?', 'N', 'Y']) - if num_epochs is None: - num_epochs = self.num_epochs - return tf.compat.v1.estimator.inputs.numpy_input_fn( - x={'x0': x0, - 'x1': x1, - 'x2': x2_str}, - y=y, - batch_size=self.batch_size if not full_data else self.num_examples, - num_epochs=num_epochs, - shuffle=False) - - def multid_feature_input_fn(self): - """Returns an input function with one multi-dimensional feature. - - column 'x' is the feature column, and column 'y' is a label column. - The transformation is deterministic, where y = _g(x[0], x[1]). - - Returns: - Function, that has signature of ()->({'x': data}, `target`) - """ - x = np.random.uniform(-1.0, 1.0, size=[self.num_examples, 2]) - x_split = np.split(x, 2, axis=1) - y = self._g(x_split[0], x_split[1]) - - return tf.compat.v1.estimator.inputs.numpy_input_fn( - x={'x': x}, - y=y, - batch_size=self.batch_size, - num_epochs=self.num_epochs, - shuffle=False) diff --git a/tensorflow_lattice/python/lib/tools.py b/tensorflow_lattice/python/lib/tools.py deleted file mode 100644 index 5087467..0000000 --- a/tensorflow_lattice/python/lib/tools.py +++ /dev/null @@ -1,421 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Library of internal functions used by TensorFlow Lattice modules.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import datetime -import os -import socket -import time -import traceback - -# Dependency imports -import tensorflow as tf - -from tensorflow.python.feature_column import feature_column as feature_column_lib # pylint: disable=g-direct-tensorflow-import -from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import - -# Name used as a default for all per-feature configurations, see -# cast_to_dict. -DEFAULT_NAME = 'tensorflow_lattice_internal_default' - - -def cast_to_scalar_tensor_of_dtype(t, dtype): - """If not yet a tensor, casts it to a constant scalar tensor.""" - if issubclass(type(t), tf.Tensor): - return t - return tf.constant(t, shape=[], dtype=dtype) - - -def cast_to_list(v, n, param_name): - if isinstance(v, list): - if len(v) != n: - raise ValueError('List given to %s has %d values, but we need %d' % - (param_name, len(v), n)) - return v - return [v] * n - - -def cast_to_dict(v, feature_names, param_name): - """If value not yet a dict, cast it to a dict of all feature names to values. - - Args: - v: can be a dict or a value. If a dict, missing feature names are set to the - value of v[DEFAULT_NAME] -- an exception is raised if some feature name is - not found v[DEFAULT_NAME] is not set. - feature_names: list of feature names that must be present in the returned - dict. - param_name: name shown in case of error, if value is not set for some - feature. - - Returns: - If value is not a dict, a new dict with the same value repeated for all - feature names. - - If value is a dict, returns a new dict with the values copied, or if not - present, copied from v[DEFAULT_NAME]. - - Raises: - ValueError: if a value is not set for a feature in feature_names, and no - default value is set. - """ - if isinstance(v, dict): - v_copy = {} - for feature_name in feature_names: - if feature_name in v: - v_copy[feature_name] = v[feature_name] - else: - if DEFAULT_NAME in v: - v_copy[feature_name] = v[DEFAULT_NAME] - else: - raise ValueError( - 'Dict given for %s does not contain definition for feature ' - '"%s"' % (param_name, feature_name)) - return v_copy - return {feature_name: v for feature_name in feature_names} - - -def cast_to_dict_of_tensor_scalars(v, feature_names, dtype, param_name): - """Cast value to a dict mapping feature names to tensor scalars.""" - if isinstance(v, dict): - # Convert each value to scalar. - res = {} - for feature_name in feature_names: - if feature_name in v: - res[feature_name] = cast_to_scalar_tensor_of_dtype( - v[feature_name], dtype) - else: - if DEFAULT_NAME in v: - res[feature_name] = cast_to_scalar_tensor_of_dtype( - v[DEFAULT_NAME], dtype) - else: - raise ValueError( - 'Dict given for %s does not contain definition for feature ' - '"%s"' % (param_name, feature_name)) - return res - - v = cast_to_scalar_tensor_of_dtype(v, dtype) - return {feature_name: v for feature_name in feature_names} - - -def input_from_feature_column(columns_to_tensors, - feature_column, - dtype=tf.float32): - """Convert one feature_column to `Tensor`, making necessary transformations. - - DenseColumns are taken as is, see `tf.feature_column.input_layer`. - CategoricalColumns are assumed to be exclusive and it takes only the value - of the category. - - Args: - columns_to_tensors: Returned by input_fn. Consider processing first by - `layers.transform_features(columns_to_tensors, feature_columns))`, since - it may share tf ops for different FeatureColumns. This function transforms - one at a time. - feature_column: feature_column to transform to `Tensor`. - dtype: `_CategoricalColumn`s are converted to this type. - - Returns: - Tensor with transformed feature column for calibration consumption. - - Raises: - ValueError: if type of FeatureColumn is unknown, and this function doesn't - know how to handle it. - """ - # pylint: disable=protected-access - if isinstance(feature_column, feature_column_lib._DenseColumn): - return feature_column_lib.input_layer( - features=columns_to_tensors, feature_columns=set([feature_column])) - elif isinstance(feature_column, feature_column_lib._CategoricalColumn): - categorical_ids = tf.cast( - feature_column._transform_feature(columns_to_tensors).values, dtype) - return tf.stack([categorical_ids], axis=1) - # pylint: enable=protected-access - raise ValueError('Cannot handle FeatureColumn {}: only _DenseColumn and ' - '_CategoricalColumn are implemented, consider converting ' - 'your column to float32 until this FeatureColumn is ' - 'supported'.format(feature_column)) - - -def get_sorted_feature_columns(feature_columns): - """Sorts an iterable of feature columns by their names in ascending order. - - Args: - feature_columns: An iterable that yields instances of a tensorflow - FeatureColumn. - - Returns: - A copy of the input sorted by name in ascending order. - """ - return sorted(feature_columns, key=lambda fc: fc.name) - - -def get_sorted_feature_names(columns_to_tensors, feature_columns=None): - """List feature names: from feature_columns or columns_to_tensors. - - This function will return the list of feature names for layers or Estimators - that use either feature_columns or directly the inputs returned by an - input_fn. - - Args: - columns_to_tensors: str-->tf.Tensor dict. A mapping from feature name to - tensors. - feature_columns: Optional set containing all the feature columns. If not set - it is assumed all features come from columns_to_tensors. Otherwise this - defines the list of features to use. All items in the set should be - instances of classes derived by FeatureColumn. - - Returns: - List of feature names. - """ - if feature_columns is not None: - return [f_col.name for f_col in get_sorted_feature_columns(feature_columns)] - return [k for k in sorted(columns_to_tensors.keys())] - - -def assert_shape(tensor, expected_shape, tensor_name): - """Assert shapes that must be known in graph construction time.""" - got_shape = tensor.shape.as_list() - if got_shape != expected_shape: - raise ValueError('Invalid shape for %s: got %s, expected %s' % - (tensor_name, got_shape, expected_shape)) - - -def add_if_not_none(a, b): - """Returns a/b is one of them is None, or their sum if both are not None.""" - if a is None: - return b - if b is None: - return a - return a + b - - - -class LatticeStructure(object): - """Lattice structure class. - - This class represents lattice vertices in a column-major order indexing that - are used in C++ lattice operators. - - With the column-major indexing, the lattice with lattice_sizes - [m_0, m_1, ..., m_{n-1}] will have: - dimension: n - number of vertices: m_0 * ... * m_{n-1} - number of vertices in each cell: 2 ** (n-1) - stride[0] = 1 - stride[1] = 1 * m_{0} - ... - stride[n-1] = 1 * m_{n-2} ... * m_0 - - LatticeStructure keeps a reference copy of lattice_sizes, so if any of element - in lattice_sizes changes, this structure's output is not useful anymore. - - """ - - def __init__(self, lattice_sizes): - """Initialize lattice structure. - - Args: - lattice_sizes: (list of ints) constains lattice size of each dimension. - - Raises: - ValueError: If any element of lattice_sizes is less than 2. - """ - - # This is a reference copy. - self.lattice_sizes = lattice_sizes - self.dimension = len(lattice_sizes) - self.num_vertices_per_cell = 2**self.dimension - self.num_vertices = 1 - self.strides = [] - for lattice_size in lattice_sizes: - self.strides.append(self.num_vertices) - if lattice_size < 2: - raise ValueError( - 'Lattice size cannot be less than 2, but one (or more) of ' - 'lattice_size is less than 2', lattice_sizes) - self.num_vertices *= lattice_size - - -def lattice_indices_generator(lattice_structure): - """lattice_indices_generator iterators all vertices in a multi-cell lattice. - - It returns a global index and per-dimension index. So a lattice of sizes - [2,3] would yield the sequence: - - (0, [0, 0]) - (1, [1, 0]) - (2, [0, 1]) - (3, [1, 1]) - (4, [0, 2]) - (5, [1, 2]) - - The access order is in the column-major order, that is consistent with C++ - lattice operators indexing convention. - - Example usage: - for (index, per_dim_index) in lattice_indices_generator(lattice structure): - flat_index = index - first_dim_index = per_dim_index[0] - - Args: - lattice_structure: (LatticeStructure) lattice structure to be used. - - Yields: - (flat_index, per_dim_indices) - """ - per_dim_indices = [0] * lattice_structure.dimension - - for flat_index in range(lattice_structure.num_vertices): - yield (flat_index, per_dim_indices) - for dim in range(lattice_structure.dimension): - per_dim_indices[dim] += 1 - if per_dim_indices[dim] == lattice_structure.lattice_sizes[dim]: - per_dim_indices[dim] = 0 - else: - break - - -def lattice_1d_slice(lattice_param_tensor, lattice_sizes, lattice_axis, begin, - size): - """Produce 1d slice of lattice param. - - Suppose we have d dimensional lattices. Recall that lattice_param_tensor - is a 2d tensor, where the first dimension is output_dim, and the second - dimension is a flattened version of lattice parameters. - - This function interprets lattice_param_tensor as (d + 1) dimensional tensor - of the form: - lattice_param[output_dim, vertex[0], vertex[1], ..., vertex[d - 1]] - and returns the flattened (2d) representation of - lattice_param[output_dim, :, :, ..., begin : begin + size, :, ..., :] - where the slicing happens at lattice_axis. - - Args: - lattice_param_tensor: [output_dim, param_dim] tensor contains lattice - parameters in the column-major order. - lattice_sizes: (list of ints) lattice size of each dimension. - lattice_axis: (int) axis slice. - begin: (int) slice beginning index at lattice_axis. - size: (int) slice size along the axis slice. - - Returns: - [output_dim, sliced_param_dim] tensor that contains sliced lattice params. - - Raises: - ValueError: * If lattice_param's shape is not a 2d tensor. - * If lattice_axis is not in [0, len(lattice_sizes) - 1]. - * If [begin, begin + size] is not a subset of - [0, lattice_sizes[lattice_axis] - 1] - """ - lattice_rank = len(lattice_sizes) - param_shape = lattice_param_tensor.shape.as_list() - if len(param_shape) != 2: - raise ValueError('Expect 2d tensor, but got %s' % param_shape) - if lattice_axis < 0 or lattice_axis >= lattice_rank: - raise ValueError('lattice_axis (%d) is out of range' % lattice_axis) - if begin < 0 or (begin + size) > lattice_sizes[lattice_axis]: - raise ValueError( - '[begin, begin + size] ([%d, %d]) is out of range [0, %d]' % - (begin, begin + size, lattice_sizes[lattice_axis])) - - output_dim = param_shape[0] - - pre_axis_param_dim = 1 - for index in range(0, lattice_axis): - pre_axis_param_dim *= lattice_sizes[index] - post_axis_param_dim = 1 - for index in range(lattice_axis + 1, lattice_rank): - post_axis_param_dim *= lattice_sizes[index] - - # Lattice param in each output dimension is in the column-major order, but - # tf.reshape works in the row-major order. So we put post_axis_param_dim - # first, and then pre_axis_param_dim. - target_shape = [ - output_dim, post_axis_param_dim, lattice_sizes[lattice_axis], - pre_axis_param_dim - ] - # reshape param to [output_dim, :, target_axis, :]. - reshaped_param = tf.reshape(lattice_param_tensor, shape=target_shape) - sliced_param = tf.slice( - reshaped_param, begin=[0, 0, begin, 0], size=[-1, -1, size, -1]) - final_slice = tf.reshape(sliced_param, shape=[output_dim, -1]) - - return final_slice - - -class SaveOnceOrWaitTimeOutError(Exception): - pass - - -def save_once_or_wait_for_chief(write_fn, - metadata_dir, - is_chief, - timeout_secs=600): - """Synchronizes saving data to disk across multiple tensorflow processes. - - This function can be used for synchronizing creation of data on disk that - needs to be available to all processes in a Tensorflow cluster. Each process - should call this function prior to using the data. The function makes the - designated chief process write the data and every other process blocks until - the data has been written. - - Args: - write_fn: A function taking no arguments that executes the write to disk. - metadata_dir: A path on the filesystem used for storing internal data used - in this function (currently, a "done" sentinal file). If this directory - doesn't exist it would be created; otherwise it should be writeable. - is_chief: Whether the current process is the designated chief. Only one - process should pass this as "True". - timeout_secs: The (approximate) time in seconds a non-chief process should - wait for the data to be created. - - Raises: - SaveOnceOrWaitTimeOutError if this is a non-chief process and the data has - not been created by the chief after timeout_secs seconds. - """ - done_file = os.path.join(metadata_dir, '__tensorflow_lattice__done') - if not is_chief: - _poll_for_file(done_file, timeout_secs) - return - - if file_io.file_exists(done_file): - return - - write_fn() - - # Create an empty done file. - file_io.recursive_create_dir(metadata_dir) - file_io.write_string_to_file( - done_file, 'Time created [UTC]: %s' - '\nHostname: %s' - '\nProcess id: %s' - '\nTraceback:\n%s' % (datetime.datetime.utcnow(), socket.gethostname(), - os.getpid(), '\n'.join(traceback.format_stack()))) - - -POLL_INTERVAL_SECS = 30 - - -def _poll_for_file(filename, timeout_secs): - start = time.time() - while not file_io.file_exists(filename): - time.sleep(POLL_INTERVAL_SECS) - if time.time() - start > timeout_secs: - raise SaveOnceOrWaitTimeOutError('Waiting for file %s timed-out' % - filename) diff --git a/tensorflow_lattice/python/lib/tools_test.py b/tensorflow_lattice/python/lib/tools_test.py deleted file mode 100644 index be02e21..0000000 --- a/tensorflow_lattice/python/lib/tools_test.py +++ /dev/null @@ -1,360 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for TensorFlow Lattice's tools module.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - -from tensorflow_lattice.python.lib import test_data -from tensorflow_lattice.python.lib import tools - -_NUM_EXAMPLES = 10 - - -class ToolsTestCase(tf.test.TestCase): - - def setUp(self): - super(ToolsTestCase, self).setUp() - self._test_data = test_data.TestData(num_examples=_NUM_EXAMPLES) - - def testCastToDict(self): - names = ['a', 'b', 'c'] - got = tools.cast_to_dict(1.0, names, 'blah') - self.assertEqual(got['a'], 1.0) - self.assertEqual(got['b'], 1.0) - self.assertEqual(got['c'], 1.0) - self.assertItemsEqual(got.keys(), names) - - got = tools.cast_to_dict({'a': 1.0, 'b': 2.0, 'c': 3.0}, names, 'blah') - self.assertEqual(got['a'], 1.0) - self.assertEqual(got['b'], 2.0) - self.assertEqual(got['c'], 3.0) - self.assertItemsEqual(got.keys(), names) - - with self.assertRaisesRegexp( - ValueError, - 'Dict given for blah does not contain definition for feature "c"'): - got = tools.cast_to_dict({'a': 1.0, 'b': 2.0}, names, 'blah') - - got = tools.cast_to_dict({'a': 1.0, tools.DEFAULT_NAME: 2.0}, names, 'blah') - self.assertItemsEqual(got.keys(), names) - self.assertEqual(got['a'], 1.0) - self.assertEqual(got['b'], 2.0) - self.assertEqual(got['c'], 2.0) - - def testCastToDictOfTensorScalars(self): - # Same value for all names. - names = ['a', 'b', 'c'] - value = np.array(1.0) - got = tools.cast_to_dict_of_tensor_scalars(value, ['a', 'b', 'c'], - tf.float32, 't1') - self.assertItemsEqual(got.keys(), names) - self.assertEqual(got['a'], got['b']) - self.assertEqual(got['b'], got['c']) - self.assertIsInstance(got['a'], tf.Tensor) - self.assertShapeEqual(value, got['a']) - - # Raises for missing names. - with self.assertRaisesRegexp( - ValueError, - 'Dict given for t2 does not contain definition for feature "c"'): - got = tools.cast_to_dict_of_tensor_scalars({ - 'a': value, - 'b': value - }, ['a', 'b', 'c'], tf.float32, 't2') - - # Uses default value - default_value = np.array(2.0) - got = tools.cast_to_dict_of_tensor_scalars( - { - 'a': value, - 'b': value, - tools.DEFAULT_NAME: default_value, - }, names, tf.float32, 't2') - self.assertItemsEqual(got.keys(), names) - - def testInputFromFeatureColumn(self): - # Tests 1-dimension real valued feature. - x = np.random.uniform(-1.0, 1.0, size=[self._test_data.num_examples]) - feature_column = tf.feature_column.numeric_column('x') - # Notice that 1-dimension features [batch_size] are packaged into a 2-dim - # tensor: [batch_size, 1] - materialized = self._materialize_feature_column(feature_column, x) - self.assertEqual(materialized.shape, (self._test_data.num_examples, 1)) - materialized = materialized[:, 0] - self.assertTrue( - self._np_array_close(x, materialized), - 'expected:{} != got:{}'.format(x, materialized)) - - # Tests that 2-dimensional real valued feature. - x = np.random.uniform(-1.0, 1.0, size=[self._test_data.num_examples, 2]) - feature_column = tf.feature_column.numeric_column('x', shape=(2,)) - materialized = self._materialize_feature_column(feature_column, x) - self.assertTrue( - self._np_array_close(x, materialized), - 'expected:{} != got:{}'.format(x, materialized)) - - # Tests that categorical feature is correctly converted. - x = np.array(['Y', 'N', '?', 'Y', 'Y', 'N']) - expect = np.array([0., 1., -1., 0., 0., 1.]) - feature_column = tf.feature_column.categorical_column_with_vocabulary_list( - 'x', ['Y', 'N']) - materialized = self._materialize_feature_column(feature_column, x)[:, 0] - self.assertTrue( - self._np_array_close(expect, materialized), - 'expect:{} != got:{}'.format(expect, materialized)) - - def testSaveOnceOrWaitForChief(self): - write_fn = tf.compat.v1.test.mock.Mock() - tools.save_once_or_wait_for_chief( - write_fn, self.get_temp_dir(), is_chief=True) - write_fn.assert_called_once_with() - write_fn.reset_mock() - write_fn.assert_not_called() - tools.save_once_or_wait_for_chief( - write_fn, self.get_temp_dir(), is_chief=True) - write_fn.assert_not_called() - tools.save_once_or_wait_for_chief( - write_fn, self.get_temp_dir(), is_chief=False) - write_fn.assert_not_called() - - @tf.compat.v1.test.mock.patch('time.time') - def testSaveOnceOrWaitForChief_Timeout(self, mock_time): - write_fn = tf.compat.v1.test.mock.Mock() - # Return 0 on the first call to 'time.time' and 1000 on the second. - mock_time.side_effect = [0, 1000] - self.assertRaises( - tools.SaveOnceOrWaitTimeOutError, - tools.save_once_or_wait_for_chief, - write_fn, - self.get_temp_dir(), - is_chief=False, - timeout_secs=999) - call = tf.compat.v1.test.mock.call - self.assertEqual(mock_time.mock_calls, [call(), call()]) - - def _np_array_close(self, a, b): - return np.alltrue(np.isclose(a, b)) - - def _materialize_feature_column(self, feature_column, x): - """Creates input_fn with x then transform and materialize feature_column.""" - input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn( - x={'x': x}, - y=None, - batch_size=self._test_data.num_examples, - num_epochs=1, - shuffle=False) - with tf.Graph().as_default(): - features = input_fn() - input_tensor = tools.input_from_feature_column(features, feature_column) - materialized = self._materialize_locally(input_tensor) - return materialized - - def _materialize_locally(self, tensors, feed_dict=None): - with tf.compat.v1.train.SingularMonitoredSession() as sess: - materialized = sess.run(tensors, feed_dict=feed_dict) - return materialized - - -class LatticeToolsTestCase(tf.test.TestCase): - - def _runIterAndCheck(self, lattice_sizes, expected_vertices): - # Running iterator, and check the returned vertices with expected_vertices. - lattice_structure = tools.LatticeStructure(lattice_sizes) - for (index, vertices) in tools.lattice_indices_generator(lattice_structure): - self.assertItemsEqual(vertices, expected_vertices[index]) - - def testTwoByThreeLatticeIteration(self): - lattice_sizes = [2, 3] - expected_vertices = [[0, 0], [1, 0], [0, 1], [1, 1], [0, 2], [1, 2]] - self._runIterAndCheck(lattice_sizes, expected_vertices) - - def testThreeByTwoByTwoIteration(self): - lattice_sizes = [3, 2, 2] - expected_vertices = [[0, 0, 0], [1, 0, 0], [2, 0, 0], [0, 1, 0], [1, 1, 0], - [2, 1, 0], [0, 0, 1], [1, 0, 1], [2, 0, 1], [0, 1, 1], - [1, 1, 1], [2, 1, 1]] - self._runIterAndCheck(lattice_sizes, expected_vertices) - - def testWrongLatticeSizeExpectsError(self): - with self.assertRaises(ValueError): - _ = tools.LatticeStructure([1, 1]) - - -class Lattice1DSliceTestCase(tf.test.TestCase): - - def _runAndCheckValues(self, slice_lattice_param_tensor, expected_value): - with self.session() as sess: - slice_lattice_param_value = sess.run(slice_lattice_param_tensor) - self.assertAllClose(slice_lattice_param_value, expected_value) - - def testTwodLatticeSlice(self): - lattice_sizes = [2, 3] - # param[0][0] = 0 - # param[1][0] = 1 - # param[0][1] = 2 - # param[1][1] = 3 - # param[0][2] = 4 - # param[1][2] = 5 - lattice_param_tensor = tf.constant([list(range(2 * 3))]) - # param[0][:] = [0, 2, 4] - param_0_x = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=0, begin=0, size=1) - self._runAndCheckValues(param_0_x, expected_value=[[0, 2, 4]]) - # param[1][:] = [1, 3, 5] - param_1_x = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=0, begin=1, size=1) - self._runAndCheckValues(param_1_x, expected_value=[[1, 3, 5]]) - # param[:][0] = [0, 1] - param_x_0 = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=1, begin=0, size=1) - self._runAndCheckValues(param_x_0, expected_value=[[0, 1]]) - # param[:][1] = [2, 3] - param_x_1 = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=1, begin=1, size=1) - self._runAndCheckValues(param_x_1, expected_value=[[2, 3]]) - # param[:][2] = [4, 5] - param_x_2 = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=1, begin=2, size=1) - self._runAndCheckValues(param_x_2, expected_value=[[4, 5]]) - # param[:][0:1] = [0, 1, 2, 3] - param_x_01 = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=1, begin=0, size=2) - self._runAndCheckValues(param_x_01, expected_value=[[0, 1, 2, 3]]) - # param[:][1:2] = [2, 3, 4, 5] - param_x_12 = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=1, begin=1, size=2) - self._runAndCheckValues(param_x_12, expected_value=[[2, 3, 4, 5]]) - - def testTwodMultiOutputLatticeSlice(self): - lattice_sizes = [2, 2] - # first_param[0][0] = 0 - # first_param[1][0] = 1 - # first_param[0][1] = 2 - # first_param[1][1] = 3 - # second_param[0][0] = 3 - # second_param[1][0] = 2 - # second_param[0][1] = 1 - # second_param[1][1] = 0 - lattice_param_tensor = tf.constant( - [list(range(2 * 2)), list(range(2 * 2 - 1, -1, -1))]) - # param[0][:] = [[0, 2], [3, 1]] - param_0_x = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=0, begin=0, size=1) - self._runAndCheckValues(param_0_x, expected_value=[[0, 2], [3, 1]]) - # param[1][:] = [[1, 3], [2, 0]] - param_1_x = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=0, begin=1, size=1) - self._runAndCheckValues(param_1_x, expected_value=[[1, 3], [2, 0]]) - # param[:][0] = [[0, 1], [3, 2]] - param_x_0 = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=1, begin=0, size=1) - self._runAndCheckValues(param_x_0, expected_value=[[0, 1], [3, 2]]) - # param[:][1] = [[2, 3], [1, 0]] - param_x_1 = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=1, begin=1, size=1) - self._runAndCheckValues(param_x_1, expected_value=[[2, 3], [1, 0]]) - - def testThreedLatticeSlice(self): - lattice_sizes = [2, 3, 2] - # param[0][0][0] = 0 - # param[1][0][0] = 1 - # param[0][1][0] = 2 - # param[1][1][0] = 3 - # param[0][2][0] = 4 - # param[1][2][0] = 5 - # param[0][0][1] = 6 - # param[1][0][1] = 7 - # param[0][1][1] = 8 - # param[1][1][1] = 9 - # param[0][2][1] = 10 - # param[1][2][1] = 11 - lattice_param_tensor = tf.constant([list(range(2 * 3 * 2))]) - # param[0][:][:] = [0, 2, 4, 6, 8, 10] - param_0_x_x = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=0, begin=0, size=1) - self._runAndCheckValues(param_0_x_x, expected_value=[[0, 2, 4, 6, 8, 10]]) - # param[1][:][:] = [1, 3, 5, 7, 9, 11] - param_1_x_x = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=0, begin=1, size=1) - self._runAndCheckValues(param_1_x_x, expected_value=[[1, 3, 5, 7, 9, 11]]) - # param[:][0][:] = [0, 1, 6, 7] - param_x_0_x = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=1, begin=0, size=1) - self._runAndCheckValues(param_x_0_x, expected_value=[[0, 1, 6, 7]]) - # param[:][1][:] = [2, 3, 8, 9] - param_x_1_x = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=1, begin=1, size=1) - self._runAndCheckValues(param_x_1_x, expected_value=[[2, 3, 8, 9]]) - # param[:][2][:] = [4, 5, 10, 11] - param_x_2_x = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=1, begin=2, size=1) - self._runAndCheckValues(param_x_2_x, expected_value=[[4, 5, 10, 11]]) - # param[:][0:1][:] = [0, 1, 2, 3, 6, 7, 8, 9] - param_x_01_x = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=1, begin=0, size=2) - self._runAndCheckValues( - param_x_01_x, expected_value=[[0, 1, 2, 3, 6, 7, 8, 9]]) - # param[:][1:2][:] = [2, 3, 4, 5, 8, 9, 10, 11] - param_x_12_x = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=1, begin=1, size=2) - self._runAndCheckValues( - param_x_12_x, expected_value=[[2, 3, 4, 5, 8, 9, 10, 11]]) - # param[:][:][0] = [0, 1, 2, 3, 4, 5] - param_x_x_0 = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=2, begin=0, size=1) - self._runAndCheckValues(param_x_x_0, expected_value=[[0, 1, 2, 3, 4, 5]]) - # param[:][:][1] = [6, 7, 8, 9, 10, 11] - param_x_x_1 = tools.lattice_1d_slice( - lattice_param_tensor, lattice_sizes, lattice_axis=2, begin=1, size=1) - self._runAndCheckValues(param_x_x_1, expected_value=[[6, 7, 8, 9, 10, 11]]) - - def testWrongTensorShapeExpectsError(self): - lattice_param_tensor = tf.compat.v1.placeholder(shape=(2, 2, 2), dtype=tf.float32) - with self.assertRaises(ValueError): - _ = tools.lattice_1d_slice( - lattice_param_tensor, - lattice_sizes=[2], - lattice_axis=0, - begin=0, - size=1) - - def testOutOfRangeAxisExpectsError(self): - lattice_param_tensor = tf.compat.v1.placeholder(shape=(2, 4), dtype=tf.float32) - with self.assertRaises(ValueError): - _ = tools.lattice_1d_slice( - lattice_param_tensor, - lattice_sizes=[2, 2], - lattice_axis=3, - begin=0, - size=1) - - def testBeginSizeOutOfRangeExpectsError(self): - lattice_param_tensor = tf.compat.v1.placeholder(shape=(2, 4), dtype=tf.float32) - with self.assertRaises(ValueError): - _ = tools.lattice_1d_slice( - lattice_param_tensor, - lattice_sizes=[2, 2], - lattice_axis=0, - begin=1, - size=2) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_lattice/python/linear_layer.py b/tensorflow_lattice/python/linear_layer.py new file mode 100644 index 0000000..47baa60 --- /dev/null +++ b/tensorflow_lattice/python/linear_layer.py @@ -0,0 +1,331 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Layer which represents linear function. See class level comment. + +This layer applies a linear transformation to the input tensor with an optional +bias term. It supports monotonicity, monotonic dominance and fixed-norm +constraints. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from . import linear_lib +import tensorflow as tf +from tensorflow import keras + +LINEAR_LAYER_KERNEL_NAME = "linear_layer_kernel" +LINEAR_LAYER_BIAS_NAME = "linear_layer_bias" + + +class Linear(keras.layers.Layer): + # pyformat: disable + """Layer which represents linear function. + + Monotonicity can be specified for any input dimension in which case learned + weight for that dimension is guaranteed to be either non negative for + increasing or non positive for decreasing monotonicity. + + Monotonic dominance can be specified for any pair of dimensions referred to as + *dominant* and *weak* dimensions such that the effect (slope) in the + direction of the *dominant* dimension to be greater than that of the *weak* + dimension for any point. Both dominant and weak dimensions must be increasing. + + Weights can be constrained to have a fixed norm. + + Input shape: + Rank-2 tensor with shape: (batch_size, num_input_dims) + + Output shape: + Rank-2 tensor with shape: (batch_size, 1) + + Attributes: + - All `__init__ `arguments. + kernel: layer's kernel. + bias: layer's bias. Only available if `use_bias == True`. + + Example: + + ```python + layer = tfl.linear_layer.Linear( + num_input_dims=8, + # Monotonicity constraints can be defined per dimension or for all dims. + monotonicities='increasing', + use_bias=True, + # You can force the L1 norm to be 1. Since this is a monotonic layer, + # the coefficients will sum to 1, making this a "weighted average". + normalization_order=1) + ``` + """ + # pyformat: enable + + def __init__(self, + num_input_dims, + monotonicities=None, + monotonic_dominances=None, + use_bias=True, + normalization_order=None, + kernel_initializer="random_uniform", + bias_initializer="random_uniform", + kernel_regularizer=None, + bias_regularizer=None, + **kwargs): + """initializes an instance of `Linear`. + + Args: + num_input_dims: Number of input dimensions. + monotonicities: None or list or tuple of length 'num_input_dims' of + {'decreasing', 'none', 'increasing', -1, 0, 1} which specifies if the + model output should be monotonic in corresponding feature, using + 'increasing' or 1 to indicate increasing monotonicity, 'decreasing' or + -1 to indicate decreasing monotonicity and 'none' or 0 to indicate no + monotonicity constraints.. + In case of decreasing monotonicity corresponding weight will be + constrained to be non positive, in case of increasing non-negative. + Instead of a list or tuple single value can be specified to indicate the + monotonicity constraint across all dimensions. + monotonic_dominances: None or list of two-element tuples. First element is + the index of the dominant feature. Second element is the index of the + weak feature. + use_bias: Whether linear function has bias. + normalization_order: If specified learned weights will be adjusted to have + norm 1. Norm will be computed by: `tf.norm(tensor, + ord=normalization_order)`. + kernel_initializer: Any keras initializer to be applied to kernel. + bias_initializer: Any keras initializer to be applied to bias. Only valid + if `use_bias == True`. + kernel_regularizer: None or single element or list of any Keras + regularizer objects. + bias_regularizer: None or single element or list of any Keras regularizer + objects. + **kwargs: Other args passed to `tf.keras.layers.Layer` initializer. + + Raises: + ValueError: if monotonicity specified incorrectly. + """ + super(Linear, self).__init__(**kwargs) + + self.num_input_dims = num_input_dims + + if isinstance(monotonicities, list) or isinstance(monotonicities, tuple): + self.monotonicities = list(monotonicities) + elif monotonicities is not None: + self.monotonicities = [monotonicities] * self.num_input_dims + else: + self.monotonicities = [0] * self.num_input_dims + self.monotonic_dominances = monotonic_dominances + # Verify hyperparameters after converting monotonicities to list because + # internally everything expects monotonicites to be list or tuple rather + # than single element. + linear_lib.verify_hyperparameters( + num_input_dims=self.num_input_dims, monotonicities=self.monotonicities) + + self.use_bias = use_bias + self.normalization_order = normalization_order + self.kernel_initializer = keras.initializers.get(kernel_initializer) + if use_bias: + self.bias_initializer = keras.initializers.get(bias_initializer) + + self.kernel_regularizer = [] + if kernel_regularizer: + if callable(kernel_regularizer): + kernel_regularizer = [kernel_regularizer] + for reg in kernel_regularizer: + self.kernel_regularizer.append(keras.regularizers.get(reg)) + self.bias_regularizer = [] + if bias_regularizer: + if callable(bias_regularizer): + bias_regularizer = [bias_regularizer] + for reg in bias_regularizer: + self.bias_regularizer.append(keras.regularizers.get(reg)) + + self.input_spec = keras.layers.InputSpec( + dtype=self.dtype, shape=(None, num_input_dims)) + + def build(self, input_shape): + """Standard Keras build() method. + + Args: + input_shape: Must be: (batch_size, num_input_dims) + + Raises: + ValueError: If shape is not (batch_size, num_input_dims). + """ + if len(input_shape) != 2 or input_shape[1] != self.num_input_dims: + raise ValueError("'input_shape' must be of rank two and number of " + "elements of second dimension must be equal to " + "'num_input_dims'. 'input_shape': " + str(input_shape) + + "'num_input_dims': " + str(self.num_input_dims)) + + if (any(self.monotonicities) or self.monotonic_dominances or + self.normalization_order): + constraints = LinearConstraints( + monotonicities=self.monotonicities, + monotonic_dominances=self.monotonic_dominances, + normalization_order=self.normalization_order) + else: + constraints = None + + if not self.kernel_regularizer: + kernel_reg = None + elif len(self.kernel_regularizer) == 1: + kernel_reg = self.kernel_regularizer[0] + else: + # Keras interface assumes only one regularizer, so summ all regularization + # losses which we have. + kernel_reg = lambda x: tf.add_n([r(x) for r in self.kernel_regularizer]) + + self.kernel = self.add_weight( + LINEAR_LAYER_KERNEL_NAME, + # 1 column matrix rather than verctor for matrix multiplication. + shape=[self.num_input_dims, 1], + initializer=self.kernel_initializer, + regularizer=kernel_reg, + constraint=constraints, + dtype=self.dtype) + + if self.use_bias: + if not self.bias_regularizer: + bias_reg = None + elif len(self.bias_regularizer) == 1: + bias_reg = self.bias_regularizer[0] + else: + bias_reg = lambda x: tf.add_n([r(x) for r in self.bias_regularizer]) + self.bias = self.add_weight( + LINEAR_LAYER_BIAS_NAME, + shape=[], + initializer=self.bias_initializer, + regularizer=bias_reg, + constraint=None, + dtype=self.dtype) + + super(Linear, self).build(input_shape) + + def call(self, inputs): + """Standard Keras call() method.""" + result = tf.matmul(inputs, self.kernel) + if self.use_bias: + result += self.bias + return result + + def compute_output_shape(self, input_shape): + """Standard Keras compute_output_shape() method.""" + del input_shape + return [None, 1] + + def get_config(self): + """Standard Keras get_config() method.""" + config = { + "num_input_dims": self.num_input_dims, + "monotonicities": self.monotonicities, + "use_bias": self.use_bias, + "normalization_order": self.normalization_order, + "monotonic_dominances": self.monotonic_dominances, + "kernel_initializer": + keras.initializers.serialize(self.kernel_initializer), + "kernel_regularizer": [ + keras.regularizers.serialize(r) for r in self.kernel_regularizer + ], + } # pyformat: disable + if self.use_bias: + config["bias_initializer"] = keras.initializers.serialize( + self.bias_initializer) + config["bias_regularizer"] = [ + keras.regularizers.serialize(r) for r in self.bias_regularizer + ] + + config.update(super(Linear, self).get_config()) + return config + + # Default eps is bigger than one for other layers because normalization is + # prone to numerical errors. + def assert_constraints(self, eps=1e-4): + """Asserts that weights satisfy all constraints. + + In graph mode builds and returns list of assertion ops. + In eager mode directly executes assetions. + + Args: + eps: Allowed constraints violation. + + Returns: + List of assertion ops in graph mode or immideately asserts in eager mode. + """ + return linear_lib.assert_constraints( + weights=self.kernel, + monotonicities=linear_lib.canonicalize_monotonicities( + self.monotonicities), + monotonic_dominances=self.monotonic_dominances, + normalization_order=self.normalization_order, + eps=eps) + + +class LinearConstraints(keras.constraints.Constraint): + # pyformat: disable + """Applies monotonicity constraints and normalization to TFL Linear layer. + + Monotonicity is specified per input dimension in which case learned weight for + those dimensions is guaranteed to be either non negative for increasing or non + positive for decreasing monotonicity. + + Weights can be constrained to have norm 1. + + Attributes: + - All `__init__` arguments. + """ + # pyformat: enable + + def __init__(self, monotonicities, monotonic_dominances=None, + normalization_order=None): + """initializes an instance of `LinearConstraints`. + + Args: + monotonicities: Same meaning as corresponding parameter of `Linear`. + monotonic_dominances: Same meaning as corresponding parameter of `Linear`. + normalization_order: Same meaning as corresponding parameter of `Linear`. + """ + linear_lib.verify_hyperparameters(monotonicities=monotonicities, + monotonic_dominances=monotonic_dominances) + self.monotonicities = monotonicities + self.monotonic_dominances = monotonic_dominances + self.normalization_order = normalization_order + + def __call__(self, w): + """Applies constraints to w. + + Args: + w: Tensor which represents weights of TFL linear layer. Must have shape: + `(len(self.monotonicities), 1)`. + + Raises: + ValueError: if shape of `w` is not `(len(self.monotonicities), 1)`. + + Returns: + Tensor `w` with monotonicity constraints and normalization applied to it. + """ + return linear_lib.project( + weights=w, + monotonicities=linear_lib.canonicalize_monotonicities( + self.monotonicities), + monotonic_dominances=self.monotonic_dominances, + normalization_order=self.normalization_order) + + def get_config(self): + """Standard Keras get_config() method.""" + return { + "monotonicities": self.monotonicities, + "monotonic_dominances": self.monotonic_dominances, + "normalization_order": self.normalization_order + } # pyformat: disable diff --git a/tensorflow_lattice/python/linear_lib.py b/tensorflow_lattice/python/linear_lib.py new file mode 100644 index 0000000..b20960d --- /dev/null +++ b/tensorflow_lattice/python/linear_lib.py @@ -0,0 +1,253 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Implementation of algorithms required for Linear layer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from . import utils +import six +import tensorflow as tf + +_NORMALIZATION_EPS = 1e-8 + + +def project(weights, monotonicities, monotonic_dominances=None, + normalization_order=None): + """Applies constraints to weights. + + Args: + weights: Tensor which represents weights of TFL linear layer. Must have + shape [len(monotonicities), 1]. + monotonicities: List or tuple of same length as number of elements in + 'weights' of {-1, 0, 1} which represent monotonicity constraints per + dimension. -1 stands for decreasing, 0 for no constraints, 1 for + increasing. + monotonic_dominances: List of two-element tuples. First element is the index + of the dominant feature. Second element is the index of the weak feature. + normalization_order: If specified weights will be adjusted to have norm 1. + Norm will be computed by: `tf.norm(tensor, ord=normalization_order)`. + + Raises: + ValueError: If shape of weights is not `(len(monotonicities), 1)`. + + Returns: + 'weights' with monotonicity constraints and normalization applied to it. + """ + verify_hyperparameters(weights_shape=weights.shape, + monotonicities=monotonicities, + monotonic_dominances=monotonic_dominances) + if any(monotonicities): + if 1 in monotonicities: + inverted_increasing_mask = tf.constant( + value=[0.0 if m == 1 else 1.0 for m in monotonicities], + dtype=weights.dtype, + shape=weights.shape) + # Multiplying by this mask will keep non monotonic dims same and will + # set monotonic dims to 0.0. Later by taking maximum with this product + # we'll essentially take maximumum of monotonic dims with 0.0. + weights = tf.maximum(weights, weights * inverted_increasing_mask) + + if -1 in monotonicities: + inverted_decreasing_mask = tf.constant( + value=[0.0 if m == -1 else 1.0 for m in monotonicities], + dtype=weights.dtype, + shape=weights.shape) + weights = tf.minimum(weights, weights * inverted_decreasing_mask) + + if monotonic_dominances: + monotonic_dominances = [(j, i) for i, j in monotonic_dominances] + weights = utils.approximately_project_categorical_partial_monotonicities( + weights, monotonic_dominances) + + if normalization_order: + norm = tf.norm(weights, ord=normalization_order) + weights = tf.cond(norm < _NORMALIZATION_EPS, + true_fn=lambda: weights, + false_fn=lambda: weights / norm) + + return weights + + +def assert_constraints(weights, monotonicities, monotonic_dominances, + normalization_order, eps=1e-4): + """Asserts that weights satisfy constraints. + + Args: + weights: Weights of Linear layer. + monotonicities: List or tuple of same length as number of elements in + 'weights' of {-1, 0, 1} which represent monotonicity constraints per + dimension. -1 stands for decreasing, 0 for no constraints, 1 for + increasing. + monotonic_dominances: List of two-element tuple. First element is the index + of the dominant feature. Second element is the index of the weak feature. + normalization_order: Whether weights have to have norm 1. Norm will be + computed by: `tf.norm(tensor, ord=normalization_order)`. + eps: Allowed constraints violation. + + Returns: + List of assetion ops in graph mode or directly executes assertions in eager + mode. + """ + asserts = [] + if any(monotonicities): + # Create constant specifying shape explicitly because otherwise due to + # weights shape ending with dimesion of size 1 broadcasting will hurt us. + monotonicities_constant = tf.constant(monotonicities, + shape=weights.shape, + dtype=weights.dtype) + diff = tf.reduce_min(weights * monotonicities_constant) + asserts.append( + tf.Assert(diff >= -eps, + data=["Monotonicity violation", + "Monotonicities:", monotonicities, + "Min monotonicity diff:", diff, + "Epsilon:", eps, + "Weights:", weights], + summarize=weights.shape[0])) + + for dominant_dim, weak_dim in monotonic_dominances or []: + diff = tf.reduce_min(weights[dominant_dim] - weights[weak_dim]) + asserts.append( + tf.Assert(diff >= -eps, + data=["Monotonic dominance violation", + "Dominant dim:", dominant_dim, + "Weak dim:", weak_dim, + "Epsilon:", eps, + "Weights:", weights], + summarize=weights.shape[0])) + + if normalization_order: + norm = tf.norm(weights, ord=normalization_order) + asserts.append( + # Norm can be either 0.0 or 1.0, because if all weights are close to 0.0 + # we can't scale them to get norm 1.0. + tf.Assert(tf.logical_or(tf.abs(norm - 1.0) < eps, + tf.abs(norm) < _NORMALIZATION_EPS), + data=["Normalization order violation", + "Norm:", norm, + "Epsilon:", eps, + "Weights:", weights], + summarize=weights.shape[0])) + return asserts + + +def verify_hyperparameters(num_input_dims=None, + monotonicities=None, + monotonic_dominances=None, + weights_shape=None): + """Verifies that all given hyperparameters are consistent. + + This function does not inspect weights themselves. Only their shape. Use + `assert_constraints()` to assert actual weights against constraints. + + Unlike linear layer itself this function requires monotonicites to be + specified via list or tuple rather than via single element because that's how + monotonicites are stored internaly. + + See `tfl.linear_layer.Linear` Layer class level comment for detailed + description of arguments. + + Args: + num_input_dims: None or number of input dimensions. + monotonicities: List or tuple of same length as number of elements in + `weights` of {-1, 0, 1} which represent monotonicity constraints per + dimension. -1 stands for decreasing, 0 for no constraints, 1 for + increasing. + monotonic_dominances: List of two-element tuples. First element is the index + of the dominant feature. Second element is the index of the weak feature. + weights_shape: None or shape of tensor which represents weights of Linear + layer. + + Raises: + ValueError: If something is inconsistent. + """ + # It also raises errors if monotonicities specified incorrectly. + monotonicities = canonicalize_monotonicities(monotonicities) + + if monotonicities is not None and num_input_dims is not None: + if len(monotonicities) != num_input_dims: + raise ValueError("Number of elements in 'monotonicities' must be equal to" + " num_input_dims. monotoniticites: %s, " + "len(monotonicities): %d, num_input_dims: %d" + % (monotonicities, len(monotonicities), num_input_dims)) + + if weights_shape is not None and monotonicities is not None: + if (len(weights_shape) != 2 or weights_shape[0] != len(monotonicities) + or weights_shape[1] != 1): + raise ValueError("Number of elements in 'monotonicities' does not " + "correspond to number of weights. Weights shape: %s, " + "monotonicities: %s" % (weights_shape, monotonicities)) + + if monotonic_dominances is not None: + assert monotonicities is not None + num_input_dims = len(monotonicities) + dim_pairs = set() + for constraint in monotonic_dominances: + if len(constraint) != 2: + raise ValueError("Monotonic dominance constraints must consist of 2 " + "elements. Seeing constraint tuple %s" % (constraint,)) + dominant_dim, weak_dim = constraint + if (dominant_dim >= num_input_dims or weak_dim >= num_input_dims or + dominant_dim < 0 or weak_dim < 0): + raise ValueError("Dimensions constrained by monotonic dominance " + "constraints are not within the input dimensions. " + "'dims': %s, %s, num_dims: %s" % + (dominant_dim, weak_dim, num_input_dims)) + if not isinstance(dominant_dim, int) or not isinstance(weak_dim, int): + raise ValueError("Monotonic dominance constraint dimensions must be " + "integers. Seeing dominant_dim %s and weak_dim %s" % + (dominant_dim, weak_dim)) + for dim in [dominant_dim, weak_dim]: + if monotonicities[dim] != 1: + raise ValueError("Monotonic dominance constraint's features must be " + "monotonic. Dimension %d is not monotonic." % (dim)) + if (weak_dim, dominant_dim) in dim_pairs: + raise ValueError("Cannot have two dominance constraints on the same " + "pair of features conflicting. Features: %d, %d" % + (dominant_dim, weak_dim)) + dim_pairs.add((dominant_dim, weak_dim)) + + +def canonicalize_monotonicities(monotonicities): + """Converts string constants representing monotonicities into integers. + + Args: + monotonicities: monotonicities hyperparameter of `Lattice` layer. + + Raises: + ValueError if one of monotonicities is invalid. + + Returns: + monotonicities represented as 0 or 1. + """ + if monotonicities: + canonicalized = [] + for item in monotonicities: + if item in [-1, 0, 1]: + canonicalized.append(item) + elif isinstance(item, six.string_types) and item.lower() == "decreasing": + canonicalized.append(-1) + elif isinstance(item, six.string_types) and item.lower() == "none": + canonicalized.append(0) + elif isinstance(item, six.string_types) and item.lower() == "increasing": + canonicalized.append(1) + else: + raise ValueError("'monotonicities' elements must be from: [-1, 0, 1, " + "'decreasing', 'none', 'increasing']. " + "Given: %s" % monotonicities) + return canonicalized + return None diff --git a/tensorflow_lattice/python/linear_test.py b/tensorflow_lattice/python/linear_test.py new file mode 100644 index 0000000..374aa12 --- /dev/null +++ b/tensorflow_lattice/python/linear_test.py @@ -0,0 +1,543 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for Tensorflow Lattice linear layer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +from absl import logging +from absl.testing import parameterized +import numpy as np +import tensorflow as tf +from tensorflow import keras +from tensorflow_lattice.python import linear_layer as linl +from tensorflow_lattice.python import linear_lib +from tensorflow_lattice.python import test_utils + +_DISABLE_ALL = False +_LOSS_EPS = 0.0001 +_SMALL_EPS = 1e-6 + + +class LinearTest(parameterized.TestCase, tf.test.TestCase): + """Tests for TFL linear layer.""" + + def _ResetAllBackends(self): + keras.backend.clear_session() + tf.compat.v1.reset_default_graph() + + def _ScaterXUniformly(self, num_points, num_dims, input_min, input_max): + """Generates num_points num_dims-dimensional points within given range.""" + np.random.seed(41) + x = [] + for _ in range(num_points): + point = [ + np.random.random() * (input_max - input_min) + input_min + for _ in range(num_dims) + ] + x.append(np.asarray(point)) + if num_dims == 1: + x.sort() + return x + + def _TwoDMeshGrid(self, num_points, num_dims, input_min, input_max): + """Mesh grid for visualisation of 3-d surfaces via pyplot.""" + if num_dims != 2: + raise ValueError("2-d mesh grid can be created only for 2-d data. Given: " + "%d." % num_dims) + return test_utils.two_dim_mesh_grid( + num_points=num_points, + x_min=input_min, + y_min=input_min, + x_max=input_max, + y_max=input_max) + + def _GenLinearFunction(self, weights, bias=0.0, noise=None): + """Returns python function which computes linear function.""" + + def Linear(x): + if len(x) != len(weights): + raise ValueError("X and weights have different number of elements. " + "X: " + str(x) + "; weights: " + str(weights)) + result = bias + if noise: + result += noise(x) + for (i, y) in enumerate(x): + result += weights[i] * y + return result + + return Linear + + def _SinPlusXPlusD(self, x): + return math.sin(x[0]) + x[0] / 3.0 + 3.0 + + def _SetDefaults(self, config): + config.setdefault("monotonicities", None) + config.setdefault("monotonic_dominances", None) + config.setdefault("use_bias", False) + config.setdefault("normalization_order", None) + config.setdefault("kernel_init_constant", 0.0) + config.setdefault("bias_init_constant", 0.0) + config.setdefault("kernel_regularizer", None) + config.setdefault("bias_regularizer", None) + config.setdefault("allowed_constraints_violation", 1e-6) + return config + + def _GetTrainingInputsAndLabels(self, config): + """Generates training inputs and labels. + + Args: + config: Dict with config for this unit test. + + Returns: + Tuple `(training_inputs, training_labels, raw_training_inputs)` where + `training_inputs` and `training_labels` are data for training and + `raw_training_inputs` are representation of `training_inputs` for + visualisation. + """ + raw_training_inputs = config["x_generator"]( + num_points=config["num_training_records"], + num_dims=config["num_input_dims"], + input_min=config["input_min"], + input_max=config["input_max"]) + + if isinstance(raw_training_inputs, tuple): + # This means that raw inputs are 2-d mesh grid. Convert them into list of + # 2-d points. + training_inputs = list(np.dstack(raw_training_inputs).reshape((-1, 2))) + else: + training_inputs = raw_training_inputs + + training_labels = [config["y_function"](x) for x in training_inputs] + return training_inputs, training_labels, raw_training_inputs + + def _TrainModel(self, config, plot_path=None): + """Trains model and returns loss. + + Args: + config: Layer config internal for this test which specifies params of + linear layer to train. + plot_path: if specified - png file name to save visualisation. See + test_utils.run_training_loop() for more details. + + Returns: + Training loss. + """ + logging.info("Testing config:") + logging.info(config) + config = self._SetDefaults(config) + + self._ResetAllBackends() + + training_inputs, training_labels, raw_training_inputs = ( + self._GetTrainingInputsAndLabels(config)) + + linear_layer = linl.Linear( + input_shape=[config["num_input_dims"]], + num_input_dims=config["num_input_dims"], + monotonicities=config["monotonicities"], + monotonic_dominances=config["monotonic_dominances"], + use_bias=config["use_bias"], + normalization_order=config["normalization_order"], + kernel_initializer=keras.initializers.Constant( + config["kernel_init_constant"]), + bias_initializer=keras.initializers.Constant( + config["bias_init_constant"]), + kernel_regularizer=config["kernel_regularizer"], + bias_regularizer=config["bias_regularizer"], + dtype=tf.float32) + model = keras.models.Sequential() + model.add(linear_layer) + optimizer = config["optimizer"](learning_rate=config["learning_rate"]) + model.compile(loss=keras.losses.mean_squared_error, optimizer=optimizer) + + training_data = (training_inputs, training_labels, raw_training_inputs) + + loss = test_utils.run_training_loop( + config=config, + training_data=training_data, + keras_model=model, + plot_path=plot_path) + + assetion_ops = linear_layer.assert_constraints( + eps=config["allowed_constraints_violation"]) + if not tf.executing_eagerly() and assetion_ops: + tf.compat.v1.keras.backend.get_session().run(assetion_ops) + return loss + + def _NegateAndTrain(self, config): + """Changes monotonicity directions to opposite and trains model.""" + negated_config = dict(config) + negated_config["y_function"] = lambda x: -config["y_function"](x) + negated_config["bias_init_constant"] = -config["bias_init_constant"] + negated_config["kernel_init_constant"] = -config["kernel_init_constant"] + + if isinstance(config["monotonicities"], list): + negated_config["monotonicities"] = [ + -monotonicity for monotonicity in + linear_lib.canonicalize_monotonicities(config["monotonicities"]) + ] + else: + negated_config["monotonicities"] = -config["monotonicities"] + + negated_loss = self._TrainModel(negated_config) + return negated_loss + + @parameterized.parameters((False, 1.623906), (True, 0.456815)) + # Expected losses are computed by running this test. Correctness is verified + # manually by looking at visualisation of learned function vs ground truth. + def testOneDUnconstrained(self, use_bias, expected_loss): + if _DISABLE_ALL: + return + config = { + "num_input_dims": 1, + "use_bias": use_bias, + "num_training_records": 128, + "num_training_epoch": 400, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.5, + "x_generator": self._ScaterXUniformly, + "input_min": 5.0, + "input_max": 25.0, + "y_function": self._SinPlusXPlusD, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=_LOSS_EPS) + + @parameterized.parameters((False, 0.881774), (True, 0.441771)) + # Expected losses are computed by running this test. Correctness is verified + # manually by looking at visualisation of learned function vs ground truth. + def testTwoDUnconstrained(self, use_bias, expected_loss): + if _DISABLE_ALL: + return + config = { + "num_input_dims": 2, + "use_bias": use_bias, + "num_training_records": 64, + "num_training_epoch": 160, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.5, + "x_generator": self._TwoDMeshGrid, + "input_min": 0.0, + "input_max": 4.0, + "y_function": self._GenLinearFunction( + weights=[-1.0, 2.0], + bias=-2.0, + noise=lambda x: math.sin(sum(x)) / 1.0), + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=_LOSS_EPS) + + def testInitializers(self): + # Test initializers by trying to fit linear function using 0 iterations. + if _DISABLE_ALL: + return + config = { + "num_input_dims": 2, + "use_bias": True, + "num_training_records": 64, + "num_training_epoch": 0, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.5, + "x_generator": self._TwoDMeshGrid, + "input_min": 0.0, + "input_max": 4.0, + "kernel_init_constant": 3.0, + "bias_init_constant": -2.0, + "y_function": self._GenLinearFunction(weights=[3.0, 3.0], bias=-2.0) + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.0, delta=_LOSS_EPS) + + def testAssertConstraints(self): + if _DISABLE_ALL: + return + config = { + "num_input_dims": 4, + "use_bias": True, + "num_training_records": 64, + "num_training_epoch": 0, + "normalization_order": 1, + "monotonicities": [1] * 4, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.5, + "x_generator": self._ScaterXUniformly, + "input_min": 0.0, + "input_max": 4.0, + "kernel_init_constant": 0.25, + "bias_init_constant": -2.0, + "y_function": self._GenLinearFunction(weights=[0.25] * 4, bias=-2.0) + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.0, delta=_LOSS_EPS) + + with self.assertRaises(tf.errors.InvalidArgumentError): + config["normalization_order"] = 2 + self._TrainModel(config) + + with self.assertRaises(tf.errors.InvalidArgumentError): + # Setting valid normalization order back and instead violating + # monotonicity. + config["normalization_order"] = 1 + config["monotonicities"] = [1, 1, -1, 0] + self._TrainModel(config) + + @parameterized.parameters((False, 1.623906), (True, 0.456815)) + # Expected losses are computed by running this test. Correctness is verified + # manually by looking at visualisation of learned function vs ground truth. + def testOneDMonotonicities_MonotonicInput(self, use_bias, expected_loss): + if _DISABLE_ALL: + return + config = { + "num_input_dims": 1, + "monotonicities": [1], + "use_bias": use_bias, + "num_training_records": 128, + "num_training_epoch": 400, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.5, + "x_generator": self._ScaterXUniformly, + "input_min": 5.0, + "input_max": 25.0, + "y_function": self._SinPlusXPlusD, + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=_LOSS_EPS) + self.assertAlmostEqual(loss, self._NegateAndTrain(config), delta=_SMALL_EPS) + + @parameterized.parameters((False, 62.670425), (True, 3.326165)) + # Expected losses are computed by running this test. Correctness is verified + # manually by looking at visualisation of learned function vs ground truth. + def testOneDMonotonicities_AntiMonotonicInput(self, use_bias, expected_loss): + if _DISABLE_ALL: + return + config = { + "num_input_dims": 1, + "monotonicities": ["increasing"], + "use_bias": use_bias, + "num_training_records": 128, + "num_training_epoch": 400, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.5, + "x_generator": self._ScaterXUniformly, + "input_min": 5.0, + "input_max": 25.0, + "y_function": lambda x: -self._SinPlusXPlusD(x), + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=_LOSS_EPS) + self.assertAlmostEqual(loss, self._NegateAndTrain(config), delta=_SMALL_EPS) + + @parameterized.parameters((1, 2.0), (1, -2.0), (2, 2.0), (2, -2.0)) + # Expected loss is computed by running this test. Correctness is verified + # manually by looking at visualisation of learned function vs ground truth. + def testOneDNormalizationOrder(self, norm_order, weight): + if _DISABLE_ALL: + return + config = { + "num_input_dims": 1, + "monotonicities": [0], + "normalization_order": norm_order, + "use_bias": True, + "num_training_records": 128, + "num_training_epoch": 20, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.5, + "x_generator": self._ScaterXUniformly, + "input_min": 0.0, + "input_max": 5.0, + "y_function": self._GenLinearFunction(weights=[weight], bias=0.0) + } # pyformat: disable + loss = self._TrainModel(config) + # For 1-d case normalization order does not change anything. + self.assertAlmostEqual(loss, 1.727717, delta=_LOSS_EPS) + + def testOneDNormalizationOrderZeroWeights(self): + if _DISABLE_ALL: + return + # Normalization is impossible when all weights are 0.0 so weights should not + # be affected by it. + config = { + "num_input_dims": 1, + "monotonicities": ["none"], + "normalization_order": 1, + "use_bias": True, + "num_training_records": 128, + "num_training_epoch": 20, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.5, + "x_generator": self._ScaterXUniformly, + "input_min": 0.0, + "input_max": 5.0, + "y_function": self._GenLinearFunction(weights=[0.0], bias=0.0) + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.0, delta=_LOSS_EPS) + + @parameterized.parameters( + (0.441771, 0), + (0.441771, ["none", "none"]), + (2.61706, 1), + (2.61706, ["increasing", "increasing"]), + (2.61706, ["increasing", "none"]), + (0.441771, ["none", "increasing"]) + ) + # Expected losses are computed by running this test. Correctness is verified + # manually by looking at visualisation of learned function vs ground truth. + def testTwoDMonotonicity(self, expected_loss, monotonicities): + if _DISABLE_ALL: + return + config = { + "num_input_dims": 2, + "monotonicities": monotonicities, + "use_bias": True, + "num_training_records": 64, + "num_training_epoch": 160, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.5, + "x_generator": self._TwoDMeshGrid, + "input_min": 0.0, + "input_max": 4.0, + "y_function": self._GenLinearFunction( + weights=[-1.0, 2.0], + bias=-2.0, + noise=lambda x: math.sin(sum(x)) / 1.0) + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=_LOSS_EPS) + self.assertAlmostEqual(loss, self._NegateAndTrain(config), delta=_SMALL_EPS) + + @parameterized.parameters( + (1, [0.2, 0.3], 0, 0.250532), # Testing sum of weights < 1.0. + (1, [0.2, 0.3], 1, 0.250532), # Monotonicity does not matter here. + (2, [0.2, 0.3], 0, 0.753999), + (1, [1.0, 2.0], 0, 5.688659), # Testing sum of weights > 1.0. + (1, [-1.0, 2.0], 0, 4.043515), + # With negative weights monotonicity matters. + (1, [-1.0, 2.0], 1, 3.433537)) + # Expected losses are computed by running this test. Correctness is verified + # manually by looking at visualisation of learned function vs ground truth. + def testTwoDNormalizationOrder(self, norm_order, weights, monotonicities, + expected_loss): + if _DISABLE_ALL: + return + config = { + "num_input_dims": 2, + "normalization_order": norm_order, + "monotonicities": monotonicities, + # If normalization order is set then layer will always converges to + # extremes if there is no bias or other layers. That's why we always + # use bias for normalization order tests. + "use_bias": True, + "num_training_records": 64, + "num_training_epoch": 160, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.15, + "x_generator": self._TwoDMeshGrid, + "input_min": 0.0, + "input_max": 4.0, + "y_function": self._GenLinearFunction( + weights=weights, noise=lambda x: math.sin(sum(x)) / 10.0) + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=_LOSS_EPS) + + @parameterized.parameters( + ([0.5, 0.6, 0.06, 0.07, 0.08], [1, 1, 1, 1, 1], 0.0408642), + ([0.5, -0.6, 0.06, -0.07, 0.08], [1, 1, 1, 1, 1], 0.561592), + ([0.5, -0.6, 0.06, -0.07, 0.08], [0, 0, 1, 1, 1], 0.047663)) + # Expected losses are computed by running this test. Correctness is verified + # manually by looking at visualisation of learned function vs ground truth. + def testFiveDAllConstraints(self, weights, monotonicities, expected_loss): + if _DISABLE_ALL: + return + config = { + "num_input_dims": 5, + "normalization_order": 1, + "monotonicities": monotonicities, + "use_bias": True, + "num_training_records": 640, + "num_training_epoch": 160, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.5, + "x_generator": self._ScaterXUniformly, + "input_min": 0.0, + "kernel_init_constant": 0.7, + "input_max": 4.0, + "y_function": self._GenLinearFunction( + weights=weights, noise=lambda x: math.sin(sum(x)) / 30.0) + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=_LOSS_EPS) + + @parameterized.parameters((0.85766, [(0, 1)]), + (1e-13, [(1, 0)])) + # Expected losses are computed by running this test. Correctness is verified + # manually by looking at visualisation of learned function vs ground truth. + def testTwoDMonotonicDominance(self, expected_loss, dominances): + if _DISABLE_ALL: + return + config = { + "num_input_dims": 2, + "monotonicities": ["increasing", "increasing"], + "monotonic_dominances": dominances, + "num_training_records": 64, + "num_training_epoch": 160, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.5, + "x_generator": self._TwoDMeshGrid, + "input_min": 0.0, + "input_max": 4.0, + "y_function": self._GenLinearFunction(weights=[1.0, 2.0]) + } # pyformat: disable + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=_LOSS_EPS) + + @parameterized.parameters( + # Standard Keras regularizer: + (keras.regularizers.l1_l2(l1=0.01, l2=0.001),), + # Tuple of regularizers: + ((keras.regularizers.l1_l2(l1=0.01, l2=0.0), + keras.regularizers.l1_l2(l1=0.0, l2=0.001)),), + ) + def testRegularizers(self, regularizer): + if _DISABLE_ALL: + return + config = { + "num_input_dims": 2, + "use_bias": True, + "num_training_records": 64, + "num_training_epoch": 0, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.5, + "x_generator": self._TwoDMeshGrid, + "input_min": 0.0, + "input_max": 4.0, + "kernel_init_constant": 2.0, + "bias_init_constant": 3.0, + "y_function": self._GenLinearFunction(weights=[2.0, 2.0], bias=3.0), + "kernel_regularizer": regularizer, + "bias_regularizer": regularizer, + } # pyformat: disable + loss = self._TrainModel(config) + # This loss is pure regularization loss because initializer matches target + # function and there was 0 training epochs. + self.assertAlmostEqual(loss, 0.087, delta=_LOSS_EPS) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensorflow_lattice/python/model_info.py b/tensorflow_lattice/python/model_info.py new file mode 100644 index 0000000..465ea94 --- /dev/null +++ b/tensorflow_lattice/python/model_info.py @@ -0,0 +1,109 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Classes defining trained TFL model structure and parameter information. + +This package provides representations and tools for analysis of a trained +TF Lattice model, e.g. a canned estimator in saved model format. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + + +class ModelGraph( + collections.namedtuple('ModelGraph', ['nodes', 'output_node'])): + """Model info and parameter as a graph. + + Note that this is not a TF graph, but rather a graph of python object that + describe model structure and parameters. + + Attributes: + nodes: List of all the nodes in the model. + output_node: The output node of the model. + """ + + +class InputFeatureNode( + collections.namedtuple('InputFeatureNode', + ['name', 'is_categorical', 'vocabulary_list'])): + """Input features to the model. + + Attributes: + name: Name of the input feature. + is_categorical: If the feature is categorical. + vocabulary_list: Category values for categorical features or None. + """ + + +class PWLCalibrationNode( + collections.namedtuple('PWLCalibrationNode', [ + 'input_node', 'input_keypoints', 'output_keypoints', 'default_input', + 'default_output' + ])): + """Represetns a PWL calibration layer. + + Attributes: + input_node: Input node for the calibration. + input_keypoints: Input keypoints for PWL calibration. + output_keypoints: Output keypoints for PWL calibration. + default_input: Default/missing input value or None. + default_output: Default/missing output value or None. + """ + + +class CategoricalCalibrationNode( + collections.namedtuple('CategoricalCalibrationNode', + ['input_node', 'output_values', 'default_input'])): + """Represetns a categorical calibration layer. + + Attributes: + input_node: Input node for the calibration. + output_values: Output calibration values. If the calibrated feature has + default/missing values, the last value will be for default/missing. + default_input: Default/missing input value or None. + """ + + +class LinearNode( + collections.namedtuple('LinearNode', + ['input_nodes', 'coefficients', 'bias'])): + """Represents a linear layer. + + Attributes: + input_nodes: List of input nodes to the linear layer. + coefficients: Linear weights. + bias: Bias term for the linear layer. + """ + + +class LatticeNode( + collections.namedtuple('LatticeNode', ['input_nodes', 'weights'])): + """Represetns a lattice layer. + + Attributes: + input_nodes: List of input nodes to the lattice layer. + weights: Lattice parameters. + """ + + +class MeanNode(collections.namedtuple('MeanNode', ['input_nodes'])): + """Represents an averaging layer. + + Attributes: + input_nodes: List of input nodes to the average layer. + """ diff --git a/tensorflow_lattice/python/ops/__init__.py b/tensorflow_lattice/python/ops/__init__.py deleted file mode 100644 index bae50ba..0000000 --- a/tensorflow_lattice/python/ops/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""TensorFlow Lattice operators.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function diff --git a/tensorflow_lattice/python/ops/lattice_ops.py b/tensorflow_lattice/python/ops/lattice_ops.py deleted file mode 100644 index 181534d..0000000 --- a/tensorflow_lattice/python/ops/lattice_ops.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Lattice interpolation and gradient ops.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -# pylint: disable=unused-import -from tensorflow_lattice.python.ops.gen_lattice_interpolation import hypercube_gradient -from tensorflow_lattice.python.ops.gen_lattice_interpolation import hypercube_interpolation -from tensorflow_lattice.python.ops.gen_lattice_interpolation import simplex_gradient -from tensorflow_lattice.python.ops.gen_lattice_interpolation import simplex_interpolation -# pylint: enable=unused-import - -_lattice_ops = tf.load_op_library( - tf.compat.v1.resource_loader.get_path_to_datafile( - '../../cc/ops/_lattice_ops.so')) - - -@tf.RegisterGradient('HypercubeInterpolation') -def _hypercube_gradient(op, grad_wrt_weight): - """Register gradient for HypercubeInterpolationOp.""" - grad_wrt_input = hypercube_gradient( - input=op.inputs[0], - weight=op.outputs[0], - grad_wrt_weight=grad_wrt_weight, - lattice_sizes=op.get_attr('lattice_sizes')) - return [grad_wrt_input] - - -@tf.RegisterGradient('SimplexInterpolation') -def _simplex_gradient(op, grad_wrt_weight): - """Register gradient for SimplexInterpolationOp.""" - grad_wrt_input = simplex_gradient( - input=op.inputs[0], - weight=op.outputs[0], - grad_wrt_weight=grad_wrt_weight, - lattice_sizes=op.get_attr('lattice_sizes')) - return [grad_wrt_input] - - -def lattice(input_tensor, - parameter_tensor, - lattice_sizes, - interpolation_type='hypercube'): - """Returns an interpolated look-up table (lattice) op. - - Args: - input_tensor: [batch_size, input_dim] tensor. - parameter_tensor: [output_dim, param_dim] tensor, where param_dim == - lattice_sizes[0] * ... * lattice_sizes[input_dim - 1]. - lattice_sizes: A list of lattice sizes of each dimension. - interpolation_type: 'hypercube' or 'simplex'. - - Returns: - output_tensor: [batch_size, num_outputs] tensor that contains the output of - hypercube lattice. - - Raises: - ValueError: If interpolation_type is not 'hypercube' nor 'simplex'. - - - """ - if interpolation_type not in ['hypercube', 'simplex']: - raise ValueError("interpolation_type should be 'hypercube' or 'simplex'") - - if interpolation_type == 'hypercube': - interpolation_weights = hypercube_interpolation( - input_tensor, lattice_sizes=lattice_sizes) - else: - interpolation_weights = simplex_interpolation( - input_tensor, lattice_sizes=lattice_sizes) - - # Now the dimension is [batch_size, num_outputs]. - - output_tensor = tf.matmul( - interpolation_weights, parameter_tensor, transpose_b=True) - - return output_tensor diff --git a/tensorflow_lattice/python/ops/pwl_calibration_ops.py b/tensorflow_lattice/python/ops/pwl_calibration_ops.py deleted file mode 100644 index b98d8c7..0000000 --- a/tensorflow_lattice/python/ops/pwl_calibration_ops.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Piecewise-linear calibration ops. - -Piecewise-linear calibration works particularly well with lattice models, and -is therefore part of the "TensorFlow Lattice" package. - -But it can be used in conjunction with other types of models as well, in -particular with linear models: it increases its power without breaking -independence of the variables (desirable in some situations). - -This file exports the basic graph operations used for calibrators. See -pwl_calibration_layers.py for more details and higher level calibration -functions, for building models. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import tensorflow as tf - -# pylint: disable=unused-import -from tensorflow_lattice.python.ops.gen_monotonic_projection import monotonic_projection -from tensorflow_lattice.python.ops.gen_pwl_indexing_calibrator import pwl_indexing_calibrator -from tensorflow_lattice.python.ops.gen_pwl_indexing_calibrator import pwl_indexing_calibrator_gradient -from tensorflow_lattice.python.ops.gen_pwl_indexing_calibrator import pwl_indexing_calibrator_sparse -from tensorflow_lattice.python.ops.gen_pwl_indexing_calibrator import pwl_indexing_calibrator_sparse_gradient -# pylint: enable=unused-import - -_pwl_calibration_ops = tf.load_op_library( - tf.compat.v1.resource_loader.get_path_to_datafile( - '../../cc/ops/_pwl_calibration_ops.so')) - - -@tf.RegisterGradient('PwlIndexingCalibrator') -def _pwl_indexing_calibrator_grad(op, grad_wrt_weights): - """Register gradient for PwlIndexingCalibrator.""" - grad_wrt_input, grad_wrt_kp_inputs = pwl_indexing_calibrator_gradient( - input=op.inputs[0], - kp_inputs=op.inputs[1], - grad_wrt_weights=grad_wrt_weights) - return [grad_wrt_input, grad_wrt_kp_inputs] - - -@tf.RegisterGradient('PwlIndexingCalibratorSparse') -def _pwl_indexing_calibrator_sparse_grad(op, unused_grad_wrt_indices, - grad_wrt_weights): - """Register gradient for PwlIndexingCalibratorSparse.""" - # unused_grad_wrt_indices is None and not used. But the optimizers do pass - # the extra parameter, so it needs to be there. - grad_wrt_input, grad_wrt_params = pwl_indexing_calibrator_sparse_gradient( - input=op.inputs[0], - kp_inputs=op.inputs[1], - indices=op.outputs[0], - grad_wrt_weights=grad_wrt_weights) - return [grad_wrt_input, grad_wrt_params] diff --git a/tensorflow_lattice/python/parallel_combination_layer.py b/tensorflow_lattice/python/parallel_combination_layer.py new file mode 100644 index 0000000..1382adb --- /dev/null +++ b/tensorflow_lattice/python/parallel_combination_layer.py @@ -0,0 +1,164 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""ParallelCombination layer for combining several parallel calibration layers. + +This layer wraps several calibration layers under single ParallelCombination one +that can be used by `Sequential` Keras model. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from tensorflow import keras +from tensorflow_lattice.python import categorical_calibration_layer +from tensorflow_lattice.python import lattice_layer +from tensorflow_lattice.python import linear_layer +from tensorflow_lattice.python import pwl_calibration_layer + + +class ParallelCombination(keras.layers.Layer): + # pyformat: disable + """Wraps several parallel calibration layers under single one. + + `ParallelCombination` is designed for combning several calibration layers + which output goes into single `Lattice` or `Linear` layer in order to be able + to use calibration layers within `Sequential` model. + + Difference from `keras.layers.Concatenate` is that last one operates on + already built objects and thus cannot be used to group layers for `Sequential` + model. + + Input shape: + `(batch_size, k)` or list of length `k` of shapes: `(batch_size, 1)` where + `k` is a number of associated calibration layers. + + Output shape: + `(batch_size, k)` or list of length `k` of shapes: `(batch_size, 1)` where + `k` is a number of associated calibration layers. Shape of output depends on + `single_output` parameter. + + Attributes: + - All `__init__` arguments. + + Example: + + Example usage with a Sequential model: + + ```python + model = keras.models.Sequential() + combined_calibrators = ParallelCombination() + for i in range(num_dims): + calibration_layer = PWLCalibration(...) + combined_calibrators.append(calibration_layer) + model.add(combined_calibrators) + model.add(Lattice(...)) + ``` + """ + # pyformat: enable + + def __init__(self, calibration_layers=None, single_output=True, **kwargs): + """Initializes an instance of `ParallelCombination`. + + Args: + calibration_layers: List of `PWLCalibration` or `CategoricalCalibration` + objects or any other layers taking and returning tensor of shape + `(batch_size, 1)`. + single_output: if True returns output as single tensor of shape + `(batch_size, k)`. Otherwise returns list of `k` tensors of shape + `(batch_size, 1)`. + **kwargs: other args passed to `tf.keras.layers.Layer` initializer. + """ + super(ParallelCombination, self).__init__(**kwargs) + self.calibration_layers = [] + for calibration_layer in calibration_layers or []: + if not isinstance(calibration_layer, dict): + self.calibration_layers.append(calibration_layer) + else: + # Keras deserialization logic must have explicit acceess to all custom + # classes. This is standard way to provide such access. + with keras.utils.custom_object_scope({ + "Lattice": + lattice_layer.Lattice, + "Linear": + linear_layer.Linear, + "PWLCalibration": + pwl_calibration_layer.PWLCalibration, + "CategoricalCalibration": + categorical_calibration_layer.CategoricalCalibration, + }): + self.calibration_layers.append( + keras.layers.deserialize(calibration_layer)) + self.single_output = single_output + + def append(self, calibration_layer): + """Appends new calibration layer to the end.""" + self.calibration_layers.append(calibration_layer) + + def build(self, input_shape): + """Standard Keras build() method.""" + if isinstance(input_shape, list): + if len(input_shape) != len(self.calibration_layers): + raise ValueError("Number of ParallelCombination input tensors does not " + "match number of calibration layers. input_shape: %s, " + "layers: %s" % (input_shape, self.calibration_layers)) + for layer, shape in zip(self.calibration_layers, input_shape): + layer.build(shape) + else: + if input_shape[1] != len(self.calibration_layers): + raise ValueError("Second dimension of ParallelCombination input tensor " + "does not match number of calibration layers. " + "input_shape: %s, layers: %s" % + (input_shape, self.calibration_layers)) + for layer in self.calibration_layers: + layer.build(tf.TensorShape([input_shape[0], 1])) + + super(ParallelCombination, self).build(input_shape) + + def call(self, inputs): + """Standard Keras call() method.""" + if not isinstance(inputs, list): + if len(inputs.shape) != 2: + raise ValueError("'inputs' is expected to have rank-2. " + "Given: %s" % inputs) + inputs = tf.split(inputs, axis=1, num_or_size_splits=inputs.shape[1]) + if len(inputs) != len(self.calibration_layers): + raise ValueError("Number of ParallelCombination input tensors does not " + "match number of calibration layers. inputs: %s, " + "layers: %s" % (inputs, self.calibration_layers)) + outputs = [ + layer(one_d_input) + for layer, one_d_input in zip(self.calibration_layers, inputs) + ] + if self.single_output: + return tf.concat(outputs, axis=1) + else: + return outputs + + def compute_output_shape(self, input_shape): + if self.single_output: + return tf.TensorShape(None, len(self.calibration_layers)) + else: + return [tf.TensorShape(None, 1)] * len(self.calibration_layers) + + def get_config(self): + """Standard Keras config for serialization.""" + config = { + "calibration_layers": [keras.layers.serialize(layer) + for layer in self.calibration_layers], + "single_output": self.single_output, + } # pyformat: disable + config.update(super(ParallelCombination, self).get_config()) + return config diff --git a/tensorflow_lattice/python/parallel_combination_test.py b/tensorflow_lattice/python/parallel_combination_test.py new file mode 100644 index 0000000..a13601c --- /dev/null +++ b/tensorflow_lattice/python/parallel_combination_test.py @@ -0,0 +1,140 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for Lattice Layer.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import numpy as np +import tensorflow as tf +from tensorflow import keras +from tensorflow_lattice.python import lattice_layer as ll +from tensorflow_lattice.python import parallel_combination_layer as pcl + + +class ParallelCombinationTest(parameterized.TestCase, tf.test.TestCase): + + def setUp(self): + super(ParallelCombinationTest, self).setUp() + self.disable_all = False + + def testParallelCombinationSingleInput(self): + if self.disable_all: + return + all_calibrators = pcl.ParallelCombination() + for i in range(3): + # Its not typical to use 1-d Lattice layer for calibration, but lets do it + # to avoid redundant dependency on PWLCalibration layer. + calibrator = ll.Lattice( + lattice_sizes=[2], output_min=0.0, output_max=i + 1.0) + all_calibrators.append(calibrator) + + # Given output range specified below linear initializer will have lattice to + # simply sum up inputs. + simple_sum = ll.Lattice( + lattice_sizes=[5] * 3, + kernel_initializer="linear_initializer", + output_min=0.0, + output_max=12.0, + name="SummingLattice") + model = keras.models.Sequential() + model.add(all_calibrators) + model.add(simple_sum) + + test_inputs = np.asarray([ + [0.0, 0.0, 0.0], + [0.1, 0.2, 0.3], + [1.0, 1.0, 1.0], + ]) + predictions = model.predict(test_inputs) + print("predictions") + print(predictions) + self.assertTrue(np.allclose(predictions, np.asarray([[0.0], [1.4], [6.0]]))) + + def testParallelCombinationMultipleInputs(self): + if self.disable_all: + return + input_layers = [keras.layers.Input(shape=[1]) for _ in range(3)] + all_calibrators = pcl.ParallelCombination(single_output=False) + for i in range(3): + # Its not typical to use 1-d Lattice layer for calibration, but lets do it + # to avoid redundant dependency on PWLCalibration layer. + calibrator = ll.Lattice( + lattice_sizes=[2], output_min=0.0, output_max=i + 1.0) + all_calibrators.append(calibrator) + + # Given output range specified below linear initializer will have lattice to + # simply sum up inputs. + simple_sum = ll.Lattice( + lattice_sizes=[5] * 3, + kernel_initializer="linear_initializer", + output_min=0.0, + output_max=12.0, + name="SummingLattice", + trainable=False) + + output = simple_sum(all_calibrators(input_layers)) + model = keras.models.Model(inputs=input_layers, outputs=output) + + test_inputs = [ + np.asarray([[0.0], [0.1], [1.0]]), + np.asarray([[0.0], [0.2], [1.0]]), + np.asarray([[0.0], [0.3], [1.0]]), + ] + predictions = model.predict(test_inputs) + print("predictions") + print(predictions) + self.assertTrue(np.allclose(predictions, np.asarray([[0.0], [1.4], [6.0]]))) + + def testParallelCombinationClone(self): + if self.disable_all: + return + input_layers = [keras.layers.Input(shape=[1]) for _ in range(3)] + all_calibrators = pcl.ParallelCombination(single_output=False) + for i in range(3): + # Its not typical to use 1-d Lattice layer for calibration, but lets do it + # to avoid redundant dependency on PWLCalibration layer. + calibrator = ll.Lattice( + lattice_sizes=[2], output_min=0.0, output_max=i + 1.0) + all_calibrators.append(calibrator) + + # Given output range specified below linear initializer will have lattice to + # simply sum up inputs. + simple_sum = ll.Lattice( + lattice_sizes=[5] * 3, + kernel_initializer="linear_initializer", + output_min=0.0, + output_max=12.0, + name="SummingLattice", + trainable=False) + + output = simple_sum(all_calibrators(input_layers)) + model = keras.models.Model(inputs=input_layers, outputs=output) + clone = keras.models.clone_model(model) + + test_inputs = [ + np.asarray([[0.0], [0.1], [1.0]]), + np.asarray([[0.0], [0.2], [1.0]]), + np.asarray([[0.0], [0.3], [1.0]]), + ] + predictions = clone.predict(test_inputs) + print("predictions") + print(predictions) + self.assertTrue(np.allclose(predictions, np.asarray([[0.0], [1.4], [6.0]]))) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensorflow_lattice/python/pwl_calibration_layer.py b/tensorflow_lattice/python/pwl_calibration_layer.py new file mode 100644 index 0000000..b02806f --- /dev/null +++ b/tensorflow_lattice/python/pwl_calibration_layer.py @@ -0,0 +1,966 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Piecewise linear calibration layer. + +Keras implementation of tensorflow lattice pwl calibration layer. Layer takes +single or multi-dimensional input and transforms it using piecewise linear +functions following monotonicity, convexity/concavity and bounds constraints if +specified. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from . import pwl_calibration_lib + +from absl import logging +import numpy as np +import six +import tensorflow as tf +from tensorflow import keras + +INTERPOLATION_KEYPOINTS_NAME = "interpolation_keypoints" +LENGTHS_NAME = "lengths" +MISSING_INPUT_VALUE_NAME = "missing_input_value" +PWL_CALIBRATION_KERNEL_NAME = "pwl_calibration_kernel" +PWL_CALIBRATION_MISSING_OUTPUT_NAME = "pwl_calibration_missing_output" + + +class PWLCalibration(keras.layers.Layer): + # pyformat: disable + """Piecewise linear calibration layer. + + Layer takes input of shape `(batch_size, units)` or `(batch_size, 1)` and + transforms it using `units` number of piecewise linear functions following + monotonicity, convexity and bounds constraints if specified. If multi + dimensional input is provides, each output will be for the corresponding + input, otherwise all PWL functions will act on the same input. All units share + the same layer configuration, but each has their separate set of trained + parameters. + + See `tfl.parallel_combination.ParallelCombination` layer for using + PWLCalibration layer within Sequential Keras models. + + Input shape: + Single input should be a rank-2 tensor with shape: `(batch_size, units)` or + `(batch_size, 1)`. The input can also be a list of two tensors of the same + shape where the first tensor is the regular input tensor and the second is the + `is_missing` tensor. In the `is_missing` tensor, 1.0 represents missing input + and 0.0 represents available input. + + Output shape: + Rank-2 tensor with shape: `(batch_size, units)`. + + Attributes: + - All `__init__` arguments. + kernel: TF variable which stores weights of piecewise linear function. + missing_output: TF variable which stores output learned for missing input. + Or TF Constant which stores `missing_output_value` if one is provided. + Available only if `impute_missing` is True. + + Example: + + ```python + calibrator = tfl.pwl_calibration_layer.PWLCalibration( + # Key-points of piecewise-linear function. + input_keypoints=np.linspace(1., 4., num=4), + # Output can be bounded, e.g. when this layer feeds into a lattice. + output_min=0.0, + output_max=2.0, + # You can specify monotonicity and other shape constraints for the layer. + monotonicity='increasing', + # You can specify TFL regularizers as tuple ('regularizer name', l1, l2). + # You can also pass any keras Regularizer object. + kernel_regularizer=('hessian', 0.0, 1e-4), + ) + ``` + """ + # pyformat: enable + + def __init__(self, + input_keypoints, + units=1, + output_min=None, + output_max=None, + clamp_min=False, + clamp_max=False, + monotonicity="none", + convexity="none", + is_cyclic=False, + kernel_initializer="equal_heights", + kernel_regularizer=None, + impute_missing=False, + missing_input_value=None, + missing_output_value=None, + num_projection_iterations=8, + **kwargs): + # pyformat: disable + """Initializes an instance of `PWLCalibration`. + + Args: + input_keypoints: Ordered list of keypoints of piecewise linear function. + Can be anything accepted by tf.convert_to_tensor(). + units: Output dimension of the layer. See class comments for details. + output_min: Minimum output of calibrator. + output_max: Maximum output of calibrator. + clamp_min: For monotonic calibrators ensures that output_min is reached. + clamp_max: For monotonic calibrators ensures that output_max is reached. + monotonicity: Constraints piecewise linear function to be monotonic using + 'increasing' or 1 to indicate increasing monotonicity, 'decreasing' or + -1 to indicate decreasing monotonicity and 'none' or 0 to indicate no + monotonicity constraints. + convexity: Constraints piecewise linear function to be convex or concave. + Convexity is indicated by 'convex' or 1, concavity is indicated by + 'concave' or -1, 'none' or 0 indicates no convexity/concavity + constraints. + Concavity together with increasing monotonicity as well as convexity + together with decreasing monotonicity results in diminishing return + constraints. + Consider increasing the value of `num_projection_iterations` if + convexity is specified, especially with larger number of keypoints. + is_cyclic: Whether the output for last keypoint should be identical to + output for first keypoint. This is useful for features such as + "time of day" or "degree of turn". If inputs are discrete and exactly + match keypoints then is_cyclic will have an effect only if TFL + regularizers are being used. + kernel_initializer: None or one of: + - String `"equal_heights"`: For pieces of pwl function to have equal + heights. + - String `"equal_slopes"`: For pieces of pwl function to have equal + slopes. + - Any Keras initializer object. If you are passing such object make sure + that you know how layer stores its data. + kernel_regularizer: None or single element or list of following: + - Tuple `("laplacian", l1, l2)` where `l1` and `l2` are floats which + represent corresponding regularization amount for Laplacian + regularizer. It penalizes the first derivative to make the function + more constant. See `tfl.pwl_calibration.LaplacianRegularizer` for more + details. + - Tuple `("hessian", l1, l2)` where `l1` and `l2` are floats which + represent corresponding regularization amount for Hessian regularizer. + It penalizes the second derivative to make the function more linear. + See `tfl.pwl_calibration.HessianRegularizer` for more details. + - Tuple `("wrinkle", l1, l2)` where `l1` and `l2` are floats which + represent corresponding regularization amount for wrinkle regularizer. + It penalizes the third derivative to make the function more smooth. + See 'tfl.pwl_calibration.WrinkleRegularizer` for more details. + - Any Keras regularizer object. + impute_missing: Whether to learn an output for cases where input data is + missing. If set to True, either `missing_input_value` should be + initialized, or the `call()` method should get pair of tensors. See + class input shape description for more details. + missing_input_value: If set, all inputs which are equal to this value will + be considered as missing. Can not be set if `impute_missing` is False. + missing_output_value: If set, instead of learning output for missing + inputs, simply maps them into this value. Can not be set if + `impute_missing` is False. + num_projection_iterations: Number of iterations of the Dykstra's + projection algorithm. Constraints are strictly satisfied at the end of + each update, but the update will be closer to a true L2 projection with + higher number of iterations. See + `tfl.pwl_calibration_lib.project_all_constraints` for more details. + **kwargs: Other args passed to `tf.keras.layers.Layer` initializer. + + Raises: + ValueError: If layer hyperparameters are invalid. + """ + # pyformat: enable + super(PWLCalibration, self).__init__(**kwargs) + + pwl_calibration_lib.verify_hyperparameters( + input_keypoints=input_keypoints, + output_min=output_min, + output_max=output_max, + monotonicity=monotonicity, + convexity=convexity, + is_cyclic=is_cyclic) + if missing_input_value is not None and not impute_missing: + raise ValueError("'missing_input_value' is specified, but " + "'impute_missing' is set to False. " + "'missing_input_value': " + str(missing_input_value)) + if missing_output_value is not None and not impute_missing: + raise ValueError("'missing_output_value' is specified, but " + "'impute_missing' is set to False. " + "'missing_output_value': " + str(missing_output_value)) + if input_keypoints is None: + raise ValueError("'input_keypoints' can't be None") + if monotonicity is None: + raise ValueError("'monotonicity' can't be None. Did you mean '0'?") + + self.input_keypoints = input_keypoints + self.units = units + self.output_min = output_min + self.output_max = output_max + self.clamp_min = clamp_min + self.clamp_max = clamp_max + (self._output_init_min, self._output_init_max, self._output_min_constraints, + self._output_max_constraints + ) = pwl_calibration_lib.convert_all_constraints(self.output_min, + self.output_max, + self.clamp_min, + self.clamp_max) + + self.monotonicity = monotonicity + self.convexity = convexity + self.is_cyclic = is_cyclic + + if kernel_initializer == "equal_heights": + self.kernel_initializer = UniformOutputInitializer( + output_min=self._output_init_min, + output_max=self._output_init_max, + monotonicity=self.monotonicity) + elif kernel_initializer == "equal_slopes": + self.kernel_initializer = UniformOutputInitializer( + output_min=self._output_init_min, + output_max=self._output_init_max, + monotonicity=self.monotonicity, + keypoints=self.input_keypoints) + else: + # Keras deserialization logic must have explicit acceess to all custom + # classes. This is standard way to provide such access. + with keras.utils.custom_object_scope({ + "UniformOutputInitializer": UniformOutputInitializer, + }): + self.kernel_initializer = keras.initializers.get(kernel_initializer) + + self.kernel_regularizer = [] + if kernel_regularizer: + if (callable(kernel_regularizer) or + (isinstance(kernel_regularizer, tuple) and + isinstance(kernel_regularizer[0], six.string_types))): + kernel_regularizer = [kernel_regularizer] + + for reg in kernel_regularizer: + if isinstance(reg, tuple): + (name, l1, l2) = reg + if name.lower() == "laplacian": + self.kernel_regularizer.append( + LaplacianRegularizer(l1=l1, l2=l2, is_cyclic=self.is_cyclic)) + elif name.lower() == "hessian": + self.kernel_regularizer.append( + HessianRegularizer(l1=l1, l2=l2, is_cyclic=self.is_cyclic)) + elif name.lower() == "wrinkle": + self.kernel_regularizer.append( + WrinkleRegularizer(l1=l1, l2=l2, is_cyclic=self.is_cyclic)) + else: + raise ValueError("Unknown custom lattice regularizer: %s" % reg) + else: + # This is needed for Keras deserialization logic to be aware of our + # custom objects. + with keras.utils.custom_object_scope({ + "LaplacianRegularizer": LaplacianRegularizer, + "HessianRegularizer": HessianRegularizer, + "WrinkleRegularizer": WrinkleRegularizer, + }): + self.kernel_regularizer.append(keras.regularizers.get(reg)) + + self.impute_missing = impute_missing + self.missing_input_value = missing_input_value + self.missing_output_value = missing_output_value + self.num_projection_iterations = num_projection_iterations + + def build(self, input_shape): + """Standard Keras build() method.""" + input_keypoints = np.array(self.input_keypoints) + # Don't need last keypoint for interpolation because we need only beginnings + # of intervals. + self._interpolation_keypoints = tf.constant( + input_keypoints[:-1], + dtype=self.dtype, + name=INTERPOLATION_KEYPOINTS_NAME) + self._lengths = tf.constant( + input_keypoints[1:] - input_keypoints[:-1], + dtype=self.dtype, + name=LENGTHS_NAME) + + constraints = PWLCalibrationConstraints( + monotonicity=self.monotonicity, + convexity=self.convexity, + lengths=self._lengths, + output_min=self.output_min, + output_max=self.output_max, + output_min_constraints=self._output_min_constraints, + output_max_constraints=self._output_max_constraints, + num_projection_iterations=self.num_projection_iterations) + + if not self.kernel_regularizer: + kernel_reg = None + elif len(self.kernel_regularizer) == 1: + kernel_reg = self.kernel_regularizer[0] + else: + # Keras interface assumes only one regularizer, so summ all regularization + # losses which we have. + kernel_reg = lambda x: tf.add_n([r(x) for r in self.kernel_regularizer]) + + # If 'is_cyclic' is specified - last weight will be computed from previous + # weights in order to connect last keypoint with first. + num_weights = input_keypoints.size - self.is_cyclic + + # PWL calibration layer kernel is units-column matrix. First row of matrix + # represents bias. All remaining represent delta in y-value compare to + # previous point. Aka heights of segments. + self.kernel = self.add_weight( + PWL_CALIBRATION_KERNEL_NAME, + shape=[num_weights, self.units], + initializer=self.kernel_initializer, + regularizer=kernel_reg, + constraint=constraints, + dtype=self.dtype) + + if self.kernel_regularizer and not tf.executing_eagerly(): + # Keras has its own mechanism to handle regularization losses which + # does not use GraphKeys, but we want to also add losses to graph keys so + # they are easily accessable when layer is being used outside of Keras. + # Adding losses to GraphKeys will not interfer with Keras. + for reg in self.kernel_regularizer: + tf.compat.v1.add_to_collection( + tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES, reg(self.kernel)) + + if self.impute_missing: + if self.missing_input_value is not None: + self._missing_input_value_tensor = tf.constant( + self.missing_input_value, + dtype=self.dtype, + name=MISSING_INPUT_VALUE_NAME) + else: + self._missing_input_value_tensor = None + + if self.missing_output_value is not None: + self.missing_output = tf.constant( + self.missing_output_value, shape=[1, self.units], dtype=self.dtype) + else: + missing_init = (self._output_init_min + self._output_init_max) / 2.0 + missing_constraints = NaiveBoundsConstraints( + lower_bound=self.output_min, upper_bound=self.output_max) + self.missing_output = self.add_weight( + PWL_CALIBRATION_MISSING_OUTPUT_NAME, + shape=[1, self.units], + initializer=keras.initializers.Constant(value=missing_init), + constraint=missing_constraints, + dtype=self.dtype) + + super(PWLCalibration, self).build(input_shape) + + def call(self, inputs): + """Standard Keras call() method.. + + Args: + inputs: Either input tensor or list of 2 elements: input tensor and + `is_missing` tensor. + + Returns: + Calibrated input tensor. + + Raises: + ValueError: If `is_missing` tensor specified incorrectly. + """ + is_missing = None + if isinstance(inputs, list): + # Only 2 element lists are allowed. When such list is given - second + # element represents 'is_missing' tensor encoded as float value. + if not self.impute_missing: + raise ValueError("Multiple inputs for PWLCalibration layer assume " + "regular input tensor and 'is_missing' tensor, but " + "this instance of a layer is not configured to handle " + "missing value. See 'impute_missing' parameter.") + if len(inputs) > 2: + raise ValueError("Multiple inputs for PWLCalibration layer assume " + "normal input tensor and 'is_missing' tensor, but more" + " than 2 tensors given. 'inputs': " + str(inputs)) + if len(inputs) == 2: + inputs, is_missing = inputs + if is_missing.shape.as_list() != inputs.shape.as_list(): + raise ValueError( + "is_missing shape %s does not match inputs shape %s for " + "PWLCalibration layer" % + (str(is_missing.shape), str(inputs.shape))) + else: + [inputs] = inputs + if len(inputs.shape) != 2 or (inputs.shape[1] != self.units and + inputs.shape[1] != 1): + raise ValueError("Shape of input tensor for PWLCalibration layer must be " + "[-1, units] or [-1, 1]. It is: " + str(inputs.shape)) + + if inputs.dtype != self._interpolation_keypoints.dtype: + raise ValueError("dtype(%s) of input to PWLCalibration layer does not " + "correspond to dtype(%s) of keypoints. You can enforce " + "dtype of keypoints be explicitly providing 'dtype' " + "parameter to layer constructor or by passing keypoints " + "in such format which by default will be converted into " + "desired one." % + (inputs.dtype, self._interpolation_keypoints.dtype)) + + # Here is calibration. Everything else is handling of missing. + if inputs.shape[1] > 1: + # Add dimension to multi dim input to get shape [batch_size, units, 1]. + # Interpolation will have shape [batch_size, units, weights]. + inputs_to_calibration = tf.expand_dims(inputs, -1) + else: + inputs_to_calibration = inputs + interpolation_weights = pwl_calibration_lib.compute_interpolation_weights( + inputs_to_calibration, self._interpolation_keypoints, self._lengths) + if self.is_cyclic: + # Need to add such last height to make all heights to sum up to 0.0 in + # order to make calibrator cyclic. + bias_and_heights = tf.concat( + [self.kernel, -tf.reduce_sum(self.kernel[1:], axis=0, keepdims=True)], + axis=0) + else: + bias_and_heights = self.kernel + + # bias_and_heights has shape [weight, units]. + if inputs.shape[1] > 1: + # Multi dim input has interpolation shape [batch_size, units, weights]. + result = tf.reduce_sum( + interpolation_weights * tf.transpose(bias_and_heights), axis=-1) + else: + # Single dim input has interpolation shape [batch_size, weights]. + result = tf.matmul(interpolation_weights, bias_and_heights) + + if self.impute_missing: + if is_missing is None: + if self.missing_input_value is None: + raise ValueError("PWLCalibration layer is configured to impute " + "missing but no 'missing_input_value' specified and " + "'is_missing' tensor is not given.") + assert self._missing_input_value_tensor is not None + is_missing = tf.cast( + tf.equal(inputs, self._missing_input_value_tensor), + dtype=self.dtype) + result = is_missing * self.missing_output + (1.0 - is_missing) * result + return result + + def compute_output_shape(self, input_shape): + """Standard Keras compute_output_shape() method.""" + del input_shape + return [None, self.units] + + def get_config(self): + """Standard Keras config for serialization.""" + config = { + "input_keypoints": self.input_keypoints, + "units": self.units, + "output_min": self.output_min, + "output_max": self.output_max, + "clamp_min": self.clamp_min, + "clamp_max": self.clamp_max, + "monotonicity": self.monotonicity, + "convexity": self.convexity, + "is_cyclic": self.is_cyclic, + "kernel_initializer": + keras.initializers.serialize(self.kernel_initializer), + "kernel_regularizer": + [keras.regularizers.serialize(r) for r in self.kernel_regularizer], + "impute_missing": self.impute_missing, + "missing_input_value": self.missing_input_value, + "num_projection_iterations": self.num_projection_iterations, + } # pyformat: disable + config.update(super(PWLCalibration, self).get_config()) + return config + + def assert_constraints(self, eps=1e-6): + """Asserts that layer weights satisfy all constraints. + + In graph mode builds and returns list of assertion ops. Note that ops will + be created at the moment when this function is being called. + In eager mode directly executes assetions. + + Args: + eps: Allowed constraints violation. + + Returns: + List of assertion ops in graph mode or immideately asserts in eager mode. + """ + # Assert by computing outputs for keypoints and testing them against + # constraints. + test_inputs = tf.constant( + value=self.input_keypoints, + dtype=self.dtype, + shape=[len(self.input_keypoints), 1]) + outputs = self.call(test_inputs) + + asserts = pwl_calibration_lib.assert_constraints( + outputs=outputs, + monotonicity=pwl_calibration_lib.canonicalize_monotonicity( + self.monotonicity), + output_min=self.output_min, + output_max=self.output_max, + clamp_min=self.clamp_min, + clamp_max=self.clamp_max, + debug_tensors=["weights:", self.kernel], + eps=eps) + + if self.impute_missing and self.missing_output_value is None: + asserts.append( + pwl_calibration_lib.assert_constraints( + outputs=self.missing_output, + monotonicity=0, + output_min=self.output_min, + output_max=self.output_max, + clamp_min=False, + clamp_max=False, + debug_tensors=["Imputed missing value:", self.missing_output], + eps=eps)) + return asserts + + def keypoints_outputs(self): + """Returns tensor which corresponds to outputs of layer for keypoints.""" + kp_outputs = tf.cumsum(self.kernel) + if self.is_cyclic: + kp_outputs = tf.concat([kp_outputs, kp_outputs[0:1]], axis=0) + return kp_outputs + + +class UniformOutputInitializer(keras.initializers.Initializer): + # pyformat: disable + """Initializes PWL calibration layer to represent linear function. + + PWL calibration layer weights are one-d tensor. First element of tensor + represents bias. All remaining represent delta in y-value compare to previous + point. Aka heights of segments. + + Attributes: + - All `__init__` arguments. + """ + # pyformat: enable + + def __init__(self, output_min, output_max, monotonicity, keypoints=None): + # pyformat: disable + """Initializes an instance of `UniformOutputInitializer`. + + Args: + output_min: Minimum value of PWL calibration output after initialization. + output_max: Maximum value of PWL calibration output after initialization. + monotonicity: + - if 'none' or 'increasing', the returned function will go from + `(input_min, output_min)` to `(input_max, output_max)`. + - if 'decreasing', the returned function will go from + `(input_min, output_max)` to `(input_max, output_min)`. + keypoints: + - if not provided (None or []), all pieces of returned function + will have equal heights (i.e. `y[i+1] - y[i]` is constant). + - if provided, all pieces of returned function will have equal slopes + (i.e. `(y[i+1] - y[i]) / (x[i+1] - x[i])` is constant). + """ + # pyformat: enable + pwl_calibration_lib.verify_hyperparameters( + input_keypoints=keypoints, + output_min=output_min, + output_max=output_max, + monotonicity=monotonicity) + self.output_min = output_min + self.output_max = output_max + self.monotonicity = monotonicity + self.keypoints = keypoints + + def __call__(self, shape, dtype=None, partition_info=None): + """Returns weights of PWL calibration layer. + + Args: + shape: Must be rank-2 tensor with of shape `(k, units)` where `k >= 2`. + dtype: Standard Keras initializer param. + partition_info: Standard Keras initializer param. + + Returns: + Weights of PWL calibration layer. + + Raises: + ValueError: If requested shape is invalid for PWL calibration layer + weights. + """ + return pwl_calibration_lib.linear_initializer( + shape=shape, + output_min=self.output_min, + output_max=self.output_max, + monotonicity=pwl_calibration_lib.canonicalize_monotonicity( + self.monotonicity), + keypoints=self.keypoints, + dtype=dtype) + + def get_config(self): + """Standard Keras config for serialization.""" + return { + "output_min": self.output_min, + "output_max": self.output_max, + "monotonicity": self.monotonicity, + "keypoints": self.keypoints, + } # pyformat: disable + + +class PWLCalibrationConstraints(keras.constraints.Constraint): + # pyformat: disable + """Monotonicity and bounds constraints for PWL calibration layer. + + Applies an approximate L2 projection to the weights of a PWLCalibration layer + such that the result satisfies the specified constraints. + + Attributes: + - All `__init__` arguments. + """ + # pyformat: enable + + def __init__( + self, + monotonicity="none", + convexity="none", + lengths=None, + output_min=None, + output_max=None, + output_min_constraints=pwl_calibration_lib.BoundConstraintsType.NONE, + output_max_constraints=pwl_calibration_lib.BoundConstraintsType.NONE, + num_projection_iterations=8): + """Initializes an instance of `PWLCalibration`. + + Args: + monotonicity: Same meaning as corresponding parameter of `PWLCalibration`. + convexity: Same meaning as corresponding parameter of `PWLCalibration`. + lengths: Lengths of pieces of piecewise linear function. Needed only if + convexity is specified. + output_min: Minimum possible output of pwl function. + output_max: Maximum possible output of pwl function. + output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` + describing the constraints on the layer's minimum value. + output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` + describing the constraints on the layer's maximum value. + num_projection_iterations: Same meaning as corresponding parameter of + `PWLCalibration`. + """ + pwl_calibration_lib.verify_hyperparameters( + output_min=output_min, + output_max=output_max, + monotonicity=monotonicity, + convexity=convexity, + lengths=lengths) + self.monotonicity = monotonicity + self.convexity = convexity + self.lengths = lengths + self.output_min = output_min + self.output_max = output_max + self.output_min_constraints = output_min_constraints + self.output_max_constraints = output_max_constraints + self.num_projection_iterations = num_projection_iterations + + canonical_convexity = pwl_calibration_lib.canonicalize_convexity( + self.convexity) + canonical_monotonicity = pwl_calibration_lib.canonicalize_monotonicity( + self.monotonicity) + if (canonical_convexity != 0 and canonical_monotonicity == 0 and + (output_min_constraints != pwl_calibration_lib.BoundConstraintsType.NONE + or output_max_constraints != + pwl_calibration_lib.BoundConstraintsType.NONE)): + logging.warning("Convexity constraints are specified with bounds " + "constraints, but without monotonicity. Such combination " + "might lead to convexity being slightly violated. " + "Consider increasing num_projection_iterations to " + "reduce violation.") + + def __call__(self, w): + """Applies constraints to w.""" + return pwl_calibration_lib.project_all_constraints( + weights=w, + monotonicity=pwl_calibration_lib.canonicalize_monotonicity( + self.monotonicity), + output_min=self.output_min, + output_max=self.output_max, + output_min_constraints=self.output_min_constraints, + output_max_constraints=self.output_max_constraints, + convexity=pwl_calibration_lib.canonicalize_convexity( + self.convexity), + lengths=self.lengths, + num_projection_iterations=self.num_projection_iterations) + + def get_config(self): + """Standard Keras config for serialization.""" + return { + "monotonicity": self.monotonicity, + "output_min": self.output_min, + "output_max": self.output_max, + "output_min_constraints": self.output_min_constraints, + "output_max_constraints": self.output_max_constraints, + "convexity": self.convexity, + "lengths": self.lengths, + "num_projection_iterations": self.num_projection_iterations, + } # pyformat: disable + + +class NaiveBoundsConstraints(keras.constraints.Constraint): + # pyformat: disable + """Naively clips all elements of tensor to be within bounds. + + This constraint is used only for the weight tensor for missing output value. + + Attributes: + - All `__init__` arguments. + """ + # pyformat: enable + + def __init__(self, lower_bound=None, upper_bound=None): + """Initializes an instance of `NaiveBoundsConstraints`. + + Args: + lower_bound: Lower bound to clip variable values to. + upper_bound: Upper bound to clip variable values to. + """ + self.lower_bound = lower_bound + self.upper_bound = upper_bound + + def __call__(self, w): + """Applies constraints to w.""" + if self.lower_bound is not None: + w = tf.maximum(w, self.lower_bound) + if self.upper_bound is not None: + w = tf.minimum(w, self.upper_bound) + return w + + def get_config(self): + """Standard Keras config for serialization.""" + return { + "lower_bound": self.lower_bound, + "upper_bound": self.upper_bound + } # pyformat: disable + + +class LaplacianRegularizer(keras.regularizers.Regularizer): + # pyformat: disable + """Laplacian regularizer for PWL calibration layer. + + Calibrator Laplacian regularization penalizes the change in the calibration + output. It is defined to be: + + `l1 * ||delta||_1 + l2 * ||delta||_2^2` + + where `delta` is: + + `output_keypoints[1:end] - output_keypoints[0:end-1]`. + + Attributes: + - All `__init__` arguments. + """ + # pyformat: enable + + def __init__(self, l1=0.0, l2=0.0, is_cyclic=False): + """Initializes an instance of `LaplacianRegularizer`. + + Args: + l1: l1 regularization amount as float. + l2: l2 regularization amount as float. + is_cyclic: Whether the first and last keypoints should take the same + output value. + """ + self.l1 = l1 + self.l2 = l2 + self.is_cyclic = is_cyclic + + def __call__(self, x): + """Returns regularization loss. + + Args: + x: Tensor of shape: `(k, units)` which represents weights of PWL + calibration layer. First row of weights is bias term. All remaining + represent delta in y-value compare to previous point (segment heights). + """ + if not self.l1 and not self.l2: + return tf.constant(0.0, dtype=x.dtype, shape=()) + heights = x[1:] + if self.is_cyclic: + # Need to add such last height to make all heights to sum up to 0.0 in + # order to make calibrator cyclic. + heights = tf.concat( + [heights, -tf.reduce_sum(heights, axis=0, keepdims=True)], axis=0) + + losses = [] + if self.l1: + losses.append(self.l1 * tf.reduce_sum(tf.abs(heights))) + if self.l2: + losses.append(self.l2 * tf.reduce_sum(tf.square(heights))) + + result = losses[0] + if len(losses) == 2: + result += losses[1] + return result + + def get_config(self): + """Standard Keras config for serialization.""" + return { + "l1": self.l1, + "l2": self.l2, + "is_cyclic": self.is_cyclic, + } # pyformat: disable + + +class HessianRegularizer(keras.regularizers.Regularizer): + # pyformat: disable + """Hessian regularizer for PWL calibration layer. + + Calibrator hessian regularizer penalizes the change in slopes of linear + pieces. It is define to be: + + `l1 * ||nonlinearity||_1 + l2 * ||nonlinearity||_2^2` + + where `nonlinearity` is: + + `2 * output_keypoints[1:end-1] - output_keypoints[0:end-2] + - output_keypoints[2:end]`. + + This regularizer is zero when the output_keypoints form a linear function of + the index (and not necessarily linear in input values, e.g. when using + non-uniform input keypoints). + + Attributes: + - All `__init__` arguments. + """ + # pyformat: enable + + def __init__(self, l1=0.0, l2=0.0, is_cyclic=False): + """Initializes an instance of `HessianRegularizer`. + + Args: + l1: l1 regularization amount as float. + l2: l2 regularization amount as float. + is_cyclic: Whether the first and last keypoints should take the same + output value. + """ + self.l1 = l1 + self.l2 = l2 + self.is_cyclic = is_cyclic + + def __call__(self, x): + """Returns regularization loss. + + Args: + x: Tensor of shape: `(k, units)` which represents weights of PWL + calibration layer. First row of weights is bias term. All remaining + represent delta in y-value compare to previous point (segment heights). + """ + if not self.l1 and not self.l2: + return tf.constant(0.0, dtype=x.dtype, shape=()) + + if self.is_cyclic: + heights = x[1:] + heights = tf.concat( + [ + heights, + -tf.reduce_sum(heights, axis=0, keepdims=True), + heights[0:1], + ], + axis=0, + ) + nonlinearity = heights[1:] - heights[:-1] + else: + nonlinearity = x[2:] - x[1:-1] + + losses = [] + if self.l1: + losses.append(self.l1 * tf.reduce_sum(tf.abs(nonlinearity))) + if self.l2: + losses.append(self.l2 * tf.reduce_sum(tf.square(nonlinearity))) + + result = losses[0] + if len(losses) == 2: + result += losses[1] + return result + + def get_config(self): + """Standard Keras config for serialization.""" + return { + "l1": self.l1, + "l2": self.l2, + "is_cyclic": self.is_cyclic, + } # pyformat: disable + + +class WrinkleRegularizer(keras.regularizers.Regularizer): + # pyformat: disable + """Wrinkle regularizer for PWL calibration layer. + + Calibrator wrinkle regularization penalizes the change in the second + derivative. It is defined to be: + + `l1 * ||third_derivative||_1 + l2 * ||third_derivative||_2^2` + + where `third_derivative` is: + + `3 * output_keypoints[1:end-2] - 3 * output_keypoints[2:end-1] + - output_keypoints[0:end-3] + output_keypoints[3:end]`. + + This regularizer is zero when the output_keypoints form a 2nd order polynomial + of the index (and not necessarily in input values, e.g. when using + non-uniform input keypoints). + + Attributes: + - All `__init__` arguments. + """ + # pyformat: enable + + def __init__(self, l1=0.0, l2=0.0, is_cyclic=False): + """Initializes an instance of `WrinkleRegularizer`. + + Args: + l1: l1 regularization amount as float. + l2: l2 regularization amount as float. + is_cyclic: Whether the first and last keypoints should take the same + output value. + """ + self.l1 = l1 + self.l2 = l2 + self.is_cyclic = is_cyclic + + def __call__(self, x): + """Returns regularization loss. + + Args: + x: Tensor of shape: `(k, units)` which represents weights of PWL + calibration layer. First row of weights is bias term. All remaining + represent delta in y-value compare to previous point (segment heights). + """ + if not self.l1 and not self.l2: + return tf.constant(0.0, dtype=x.dtype, shape=()) + if x.shape[0] < 3: + return tf.constant(0.0, dtype=x.dtype, shape=()) + + if self.is_cyclic: + heights = x[1:] + heights = tf.concat( + [ + heights, + -tf.reduce_sum(heights, axis=0, keepdims=True), + heights[0:1], + heights[1:2], + ], + axis=0, + ) + nonlinearity = heights[1:] - heights[:-1] + else: + nonlinearity = x[2:] - x[1:-1] + wrinkleness = nonlinearity[1:] - nonlinearity[0:-1] + + losses = [] + if self.l1: + losses.append(self.l1 * tf.reduce_sum(tf.abs(wrinkleness))) + if self.l2: + losses.append(self.l2 * tf.reduce_sum(tf.square(wrinkleness))) + + result = losses[0] + if len(losses) == 2: + result += losses[1] + return result + + def get_config(self): + """Standard Keras config for serialization.""" + return { + "l1": self.l1, + "l2": self.l2, + "is_cyclic": self.is_cyclic, + } # pyformat: disable diff --git a/tensorflow_lattice/python/pwl_calibration_lib.py b/tensorflow_lattice/python/pwl_calibration_lib.py new file mode 100644 index 0000000..a3eff70 --- /dev/null +++ b/tensorflow_lattice/python/pwl_calibration_lib.py @@ -0,0 +1,998 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Implementation of algorithms required for PWL calibration layer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import copy +from enum import Enum +import six +import tensorflow as tf + + +class BoundConstraintsType(Enum): + """Type of bound constraints for PWL calibration. + + - NONE: no constraints. + - BOUND: output range can be anywhere within bounds. + - CLAMPED: output range must exactly match bounds. + """ + NONE = 0 + BOUND = 1 + CLAMPED = 2 + + +def convert_all_constraints(output_min, output_max, clamp_min, clamp_max): + """Converts parameters of PWL calibration layer to internal format. + + Args: + output_min: None for unconstrained bound or some numeric value. + output_max: None for unconstrained bound or some numeric value. + clamp_min: Whether to clamp pwl calibrator to value if `output_min` is not + None. + clamp_max: Whether to clamp pwl calibrator to value if `output_max` is not + None. + + Returns: + "value" as float and appropriate value of + `tfl.pwl_calibration_lib.BoundConstraintsType` enum which corresponds to + `output_min(max)` and `clamp_min(max)`. + """ + if output_min is None: + output_max, output_max_constraints = _convert_constraints( + output_max, clamp_max) + output_min = output_max + output_min_constraints = BoundConstraintsType.NONE + elif output_max is None: + output_min, output_min_constraints = _convert_constraints( + output_min, clamp_min) + output_max = output_min + output_max_constraints = BoundConstraintsType.NONE + else: + output_min, output_min_constraints = _convert_constraints( + output_min, clamp_min) + output_max, output_max_constraints = _convert_constraints( + output_max, clamp_max) + return output_min, output_max, output_min_constraints, output_max_constraints + + +def _convert_constraints(value, clamp_to_value): + """Converts constraints for output_min/max to internal format. + + Args: + value: None for unconstrained bound or some numeric value. + clamp_to_value: Whether to clamp pwl calibrator to value if value isn't None + + Returns: + "value" as float and appropriate value of + `tfl.pwl_calibration_lib.BoundConstraintsType` enum which + corresponds to `value` and `clamp_to_value`. + """ + if value is None: + return 0.0, BoundConstraintsType.NONE + else: + value = float(value) + if clamp_to_value: + return value, BoundConstraintsType.CLAMPED + else: + return value, BoundConstraintsType.BOUND + + +def compute_interpolation_weights(inputs, keypoints, lengths): + """Computes weights for PWL calibration. + + Args: + inputs: Tensor of shape: `(D0, D1, ..., DN, 1)` which represents inputs to + to the pwl function. A typical shape is: `(batch_size, 1)`. + keypoints: Rank-1 tensor of shape `(num_keypoints - 1)` which represents + left keypoint of pieces of piecewise linear function along X axis. + lengths: Rank-1 tensor of shape `(num_keypoints - 1)` which represents + lengths of pieces of piecewise linear function along X axis. + + Returns: + Interpolation weights tensor of shape: `(D0, D1, ..., DN, num_keypoints)`. + """ + weights = (inputs - keypoints) / lengths + weights = tf.minimum(weights, 1.0) + weights = tf.maximum(weights, 0.0) + # Prepend 1.0 at the beginning to add bias unconditionally. + return tf.concat([tf.ones_like(inputs), weights], axis=-1) + + +def linear_initializer(shape, + output_min, + output_max, + monotonicity, + keypoints=None, + dtype=None): + """Initializes PWL calibration layer to represent linear function. + + PWL calibration layer weights have shape `(knum_keypoints, units)`. First row + represents bias. All remaining represent delta in y-value compare to previous + point. Aka heights of segments. + + Args: + shape: Requested shape. Must be `(num_keypoints, units)`. + output_min: Minimum value of PWL calibration output after initialization. + output_max: Maximum value of PWL calibration output after initialization. + monotonicity: If one of {0, 1}, the returned function will go from + `(input_min, output_min)` to `(input_max, output_max)`. If set to -1, the + returned function will go from `(input_min, output_max)` to `(input_max, + output_min)`. + keypoints: If not provided (None or []), all pieces of returned function + will have equal heights (i.e. `y[i+1] - y[i]` is constant). If provided, + all pieces of returned function will have equal slopes (i.e. `(y[i+1] - + y[i]) / (x[i+1] - x[i])` is constant). + dtype: dtype. + + Returns: + PWLCalibration layer weights initialized according to params. + + Raises: + ValueError: If given parameters are inconsistent. + """ + verify_hyperparameters( + input_keypoints=keypoints, + output_min=output_min, + output_max=output_max, + monotonicity=monotonicity, + weights_shape=shape) + + num_keypoints, units = int(shape[0]), int(shape[1]) + if keypoints is None: + # Subtract 1 for bias which will be handled separately. + num_pieces = num_keypoints - 1 + segment_height = (output_max - output_min) / num_pieces + heights_tensor = tf.constant( + [segment_height] * num_pieces, shape=[num_pieces, 1], dtype=dtype) + else: + keypoints_tensor = tf.constant( + keypoints, shape=[num_keypoints, 1], dtype=dtype) + lengths_tensor = keypoints_tensor[1:] - keypoints_tensor[0:-1] + output_range = output_max - output_min + heights_tensor = ( + lengths_tensor * (output_range / tf.reduce_sum(lengths_tensor))) + + if units > 1: + heights_tensor = tf.tile(heights_tensor, multiples=[1, units]) + + if monotonicity == -1: + bias = output_max + heights_tensor = -heights_tensor + else: + bias = output_min + bias_tensor = tf.constant(bias, shape=[1, units], dtype=dtype) + + return tf.concat([bias_tensor, heights_tensor], axis=0) + + +def _approximately_project_bounds_only(bias, heights, output_min, output_max, + output_min_constraints, + output_max_constraints): + """Bounds constraints implementation for PWL calibration layer. + + Maps given weights of PWL calibration layer into some point which satisfies + given bounds by capping the function based on the bounds. This is not an exact + projection in L2 norm, but it is sufficiently accurate and efficient in + practice for non monotonic functions. + + Args: + bias: `(1, units)`-shape tensor which represents bias. + heights: `(num_heights, units)`-shape tensor which represents heights. + output_min: Minimum possible output of pwl function. + output_max: Maximum possible output of pwl function. + output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` + describing the constraints on the layer's minimum value. + output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` + describing the constraints on the layer's maximum value. + + Raises: + ValueError: If `output_min(max)_constraints` is set to "CLAMPED" which is + not supported. + + Returns: + Projected bias and heights. + """ + if (output_min_constraints == BoundConstraintsType.CLAMPED or + output_max_constraints == BoundConstraintsType.CLAMPED): + raise ValueError("Clamping is not implemented for non monotonic functions.") + if (output_min_constraints == BoundConstraintsType.NONE and + output_max_constraints == BoundConstraintsType.NONE): + return bias, heights + + # Compute cumulative sums - they correspond to our calibrator outputs at + # keypoints. Simply clip them according to config and compute new heights + # using clipped cumulative sums. + sums = tf.cumsum(tf.concat([bias, heights], axis=0)) + if output_min_constraints == BoundConstraintsType.BOUND: + sums = tf.maximum(sums, output_min) + if output_max_constraints == BoundConstraintsType.BOUND: + sums = tf.minimum(sums, output_max) + + bias = sums[0:1] + heights = sums[1:] - sums[:-1] + return bias, heights + + +def _project_bounds_considering_monotonicity(bias, heights, monotonicity, + output_min, output_max, + output_min_constraints, + output_max_constraints): + """Bounds projection given monotonicity constraints. + + Projects weights of PWLCalibration layer into nearest in terms of l2 distance + point which satisfies bounds constraints taking into account that function + is monotonic. + + Algorithm: + To minimize L2 distance to projected point we want to distribute update + through heights as evenly as possible. A simplified description of the + algorithm for and increasing function is as follows: + Consider only increasing function. + + ``` + delta = (output_max - (bias + sum(heights[:]))) / (num_heights + 1) + bias = max(bias + delta, output_min) + heights[:] += delta + ``` + + Some details which were omitted above: + * If `output_min_constraints == "CAPPED"` then `bias` variable becomes + constant (this means we can't add delta to it). + * if `output_max_constraints != "CAPPED"` we are looking only for negative + delta because we are not required to stretch function to meet upper bound. + * If function is decreasing we multiply everything by -1 and switch min and + max to make it increasing. + + Args: + bias: `(1, units)`-shape tensor which represents bias. + heights: `(num_heights, units)`-shape tensor which represents heights. + monotonicity: 1 for increasing, -1 for decreasing. + output_min: Lower bound constraint of PWL calibration layer. + output_max: Upper bound constraint of PWL calibration layer. + output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` + describing the constraints on the layer's minimum value. + output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` + describing the constraints on the layer's maximum value. + + Returns: + Projected bias and heights tensors. + + Raises: + ValueError: If monotonicity is not in: {-1, 1} + """ + if monotonicity not in [-1, 1]: + raise ValueError("Monotonicity should be one of: [-1, 1]. It is: " + + str(monotonicity)) + if monotonicity == -1: + # Reduce computation of projection of decreasing function to computation of + # projection of increasing function by multiplying everything by -1 and + # swapping maximums and minimums. + (projected_bias, + projected_heights) = _project_bounds_considering_monotonicity( + bias=-bias, + heights=-heights, + monotonicity=1, + output_min=None if output_max is None else -output_max, + output_max=None if output_min is None else -output_min, + output_min_constraints=output_max_constraints, + output_max_constraints=output_min_constraints) + return -projected_bias, -projected_heights + + bct = BoundConstraintsType + if output_max_constraints != bct.NONE: + num_heights = float(heights.shape.dims[0].value) + sum_heights = tf.reduce_sum(heights, axis=0) + + # For each possible output_min_constraints value compute projected bias and + # heights_delta. + if output_min_constraints == bct.CLAMPED: + # If output_min is clamped - bias must have fixed value and number of free + # parameters is equal to number of heights. + bias = tf.constant(output_min, shape=bias.shape, dtype=bias.dtype) + heights_delta = (output_max - (bias + sum_heights)) / num_heights + elif output_min_constraints == bct.BOUND: + # If output_min is not clamped then number of free parameters is + # num_heights + 1. + bias_delta = (output_max - (bias + sum_heights)) / (num_heights + 1) + if output_max_constraints != bct.CLAMPED: + # If output_max is not clamped - there is no need to stretch our + # function. We need only to squeeze it. + bias_delta = tf.minimum(bias_delta, 0.0) + bias = tf.maximum(bias + bias_delta, output_min) + # For this branch compute heights delta _after_ we applied bias projection + # because heights are not bound by output_min constraint unlike bias. + heights_delta = (output_max - (bias + sum_heights)) / num_heights + else: + bias_delta = (output_max - (bias + sum_heights)) / (num_heights + 1) + # For this branch heights delta and bias delta are same because none of + # them are bounded from below. + heights_delta = bias_delta + if output_max_constraints != bct.CLAMPED: + # If output_max is not clamped - there is no need to stretch our + # function. We need only to squeeze it. + bias_delta = tf.minimum(bias_delta, 0.0) + bias += bias_delta + + if output_max_constraints != bct.CLAMPED: + # If output_max is not clamped - there is no need to stretch our function. + # We need only to squeeze it. + heights_delta = tf.minimum(heights_delta, 0.0) + heights += heights_delta + else: + # No need to do anything with heights if there are no output_max + # constraints. + if output_min_constraints == bct.CLAMPED: + bias = tf.constant(output_min, shape=bias.shape, dtype=bias.dtype) + elif output_min_constraints == bct.BOUND: + bias = tf.maximum(bias, output_min) + + return bias, heights + + +def _project_convexity(heights, lengths, convexity, constraint_group): + """Convexity projection for given 'constraint_group'. + + Since an exact single step projection is not possible for convexity + constraints, we break the constraints into two independent groups and apply + Dykstra's alternating projections algorithm. Each group consists of a list of + pairs where each pair represents constraints on 2 consequtive heights. + + Groups: + + ``` + g0 = [(h0, h1), (h2, h3), (h4, h5), ...] + g1 = [(h1, h2), (h3, h4), (h5, h6), ...] + ``` + + We know how to project single pair of adjacent heights: + h0_prime = min/max(h0, (l0 / (l0 + l1)) * (h0 + h1)) + h1_prime = min/max(h1, (l1 / (l0 + l1)) * (h0 + h1)) + where l0 and l1 stand for lengths of segment which correspond to h0 and h1 and + choise of min or max functions depends on convexity direction. + + We can see that all pairs within same group are independent so we know how to + project such group of constraints in single pass. + + This function breaks heights and their lengths into given constraint group + and does projection for this group. + + Args: + heights: `(num_heights, units)`-shape tensor which represents heights. + lengths: `(num_heights)`-shape tensor which represents lengths of segments + which correspond to heights. + convexity: -1 or 1 where 1 stands for convex function and -1 for concave. + constraint_group: 0 or 1 which represent group from description above. + + Returns: + Projected heights for given constraint group. + """ + verify_hyperparameters( + convexity=convexity, + lengths=lengths, + weights_shape=[heights.shape[0] + 1, heights.shape[1]]) + if constraint_group not in [0, 1]: + raise ValueError("constraint_group must be one of: [0, 1]. " + "Given: %s" % constraint_group) + + if convexity == 0 or heights.shape[0] == 1: + return heights + + num_heights = heights.shape.dims[0].value + # To avoid broadcasting when performing math ops with 'heights'. + lengths = tf.reshape(lengths, shape=(-1, 1)) + + # Split heigths and lengths into pairs which correspond to given constraint + # group. In order to do this we need to split heights into odd and even. We + # can possibly omit last element of larger set to ensure that both sets have + # same number of elements. + num_0 = (num_heights - constraint_group + 1) // 2 + num_1 = (num_heights - constraint_group) // 2 + if num_1 == num_0: + last_index = None + else: + last_index = -1 + heights_0 = heights[constraint_group:last_index:2] + lengths_0 = lengths[constraint_group:last_index:2] + heights_1 = heights[constraint_group + 1::2] + lengths_1 = lengths[constraint_group + 1::2] + + # h0_prime = (l0 / (l0 + l1)) * (h0 + h1) = l0 * base + # h1_prime = (l1 / (l0 + l1)) * (h0 + h1) = l1 * base + base = (heights_0 + heights_1) / (lengths_0 + lengths_1) + heights_0_prime = lengths_0 * base + heights_1_prime = lengths_1 * base + if convexity == 1: + heights_0 = tf.minimum(heights_0, heights_0_prime) + heights_1 = tf.maximum(heights_1, heights_1_prime) + else: + heights_0 = tf.maximum(heights_0, heights_0_prime) + heights_1 = tf.minimum(heights_1, heights_1_prime) + + # Now we need to merge heights in such way that elements from 'heights_0' and + # 'heights_1' alternate: + # merged = [heights_0[0], heights_1[0], heights_0[1], heights_1[1], ...] + # Achieve this by concatenating along axis=1 so after concatenation elements + # from 'heights_0' and 'heights_1' will alternate in memory and reshape will + # give us desired result. + projected_heights = tf.reshape( + tf.concat([heights_0, heights_1], axis=1), shape=[-1, heights.shape[1]]) + + weights_pieces = [projected_heights] + if constraint_group == 1: + # First height was skipped during initial split. + weights_pieces = [heights[0:1]] + weights_pieces + if last_index == -1: + # Last height was skipped during initial split. + weights_pieces.append(heights[-1:]) + + if len(weights_pieces) == 1: + return weights_pieces[0] + else: + return tf.concat(weights_pieces, axis=0) + + +def _project_monotonicity(heights, monotonicity): + """Projects into monotonic function.""" + if monotonicity == 0: + return heights + elif monotonicity == 1: + return tf.maximum(heights, 0.0) + else: + return tf.minimum(heights, 0.0) + + +def project_all_constraints(weights, + monotonicity, + output_min, + output_max, + output_min_constraints, + output_max_constraints, + convexity, + lengths, + num_projection_iterations=8): + """Jointly projects into all supported constraints. + + For all combinations of constraints except the case where bounds constraints + are specified without monotonicity constraints we properly project into + nearest point with respect to L2 norm. For later case we use a heuristic to + map input point into some feasible point with no guarantees on how close this + point is to the true projection. + + If only bounds or only monotonicity constraints are specified there will be a + single step projection. For all other combinations of constraints we use + num_projection_iterations iterations of Dykstra's alternating projection + algorithm to jointly project onto all the given constraints. Dykstra's + algorithm gives us proper projection with respect to L2 norm but approaches it + from "wrong" side. That's why in order to ensure that constraints are strictly + met we'll do approximate projections in the end which project strictly into + feasible space, but it's not an exact projection with respect to the L2 norm. + With enough iterations of the Dykstra's algorithm, the impact of such + approximate projection should be negligible. + + With bound and convexity constraints and no specified monotonicity, this + method does not fully satisfy the constrains. Increasing the number of + iterations can reduce the constraint violation in such cases. + + Args: + weights: `(num_keypoints, units)`-shape tensor which represents weights of + PWL calibration layer. + monotonicity: 1 for increasing, -1 for decreasing, 0 for no monotonicity + constraints. + output_min: Lower bound constraint of PWL calibration layer. + output_max: Upper bound constraint of PWL calibration layer. + output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` + describing the constraints on the layer's minimum value. + output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` + describing the constraints on the layer's maximum value. + convexity: 1 for convex, -1 for concave, 0 for no convexity constraints. + lengths: Lengths of pieces of piecewise linear function. Needed only if + convexity projection is specified. + num_projection_iterations: Number of iterations of Dykstra's alternating + projection algorithm. + + Returns: + Projected weights tensor. + """ + bias = weights[0:1] + heights = weights[1:] + + def body(projection_counter, bias, heights, last_bias_change, + last_heights_change): + """The body of tf.while_loop implementing a step of Dykstra's projection. + + Args: + projection_counter: The counter tensor or number at the beginning of the + iteration. + bias: Bias tensor at the beginning of the iteration. + heights: Heights tensor at the beginning of the iteration. + last_bias_change: Dict that stores the last change in the bias after + projecting onto each subset of constraints. + last_heights_change: Dict that stores the last change in the heights after + projecting onto each subset of constraints. + + Returns: + The tuple `(num_projection_counter, bias, heights, last_bias_change, + last_heights_change)` at the end of the iteration. + """ + last_bias_change = copy.copy(last_bias_change) + last_heights_change = copy.copy(last_heights_change) + num_projections = 0 + # ******************** BOUNDS ********************* + bct = BoundConstraintsType + if output_min_constraints != bct.NONE or output_max_constraints != bct.NONE: + rolled_back_bias = bias - last_bias_change["BOUNDS"] + rolled_back_heights = heights - last_heights_change["BOUNDS"] + if monotonicity != 0: + bias, heights = _project_bounds_considering_monotonicity( + bias=rolled_back_bias, + heights=rolled_back_heights, + monotonicity=monotonicity, + output_min=output_min, + output_max=output_max, + output_min_constraints=output_min_constraints, + output_max_constraints=output_max_constraints) + else: + bias, heights = _approximately_project_bounds_only( + bias=rolled_back_bias, + heights=rolled_back_heights, + output_min=output_min, + output_max=output_max, + output_min_constraints=output_min_constraints, + output_max_constraints=output_max_constraints) + last_bias_change["BOUNDS"] = bias - rolled_back_bias + last_heights_change["BOUNDS"] = heights - rolled_back_heights + num_projections += 1 + + # ******************** MONOTONICITY ********************* + if monotonicity != 0: + rolled_back_heights = heights - last_heights_change["MONOTONICITY"] + heights = _project_monotonicity( + heights=rolled_back_heights, monotonicity=monotonicity) + last_heights_change["MONOTONICITY"] = heights - rolled_back_heights + num_projections += 1 + + # ******************** CONVEXITY ********************* + if convexity != 0: + if heights.shape[0] >= 2: + rolled_back_heights = heights - last_heights_change["CONVEXITY_0"] + heights = _project_convexity( + heights=rolled_back_heights, + lengths=lengths, + convexity=convexity, + constraint_group=0) + last_heights_change["CONVEXITY_0"] = heights - rolled_back_heights + num_projections += 1 + if heights.shape[0] >= 3: + rolled_back_heights = heights - last_heights_change["CONVEXITY_1"] + heights = _project_convexity( + heights=rolled_back_heights, + lengths=lengths, + convexity=convexity, + constraint_group=1) + last_heights_change["CONVEXITY_1"] = heights - rolled_back_heights + num_projections += 1 + + return (projection_counter + num_projections, bias, heights, + last_bias_change, last_heights_change) + + # Call the body of the loop once to see if Dykstra's is needed. + # If there is only one set of projections, apply it without a loop. + # Running the body of the loop also finds the required last_bias_change + # and last_heights_change keys. The set of keys in the input and output of the + # body of tf.while_loop must be the same across iterations. + zero_bias = tf.zeros_like(bias) + zero_heights = tf.zeros_like(heights) + last_bias_change = collections.defaultdict(lambda: zero_bias) + last_heights_change = collections.defaultdict(lambda: zero_heights) + (num_projections, projected_bias, projected_heights, last_bias_change, + last_heights_change) = body(0, bias, heights, last_bias_change, + last_heights_change) + if num_projections <= 1: + return tf.concat([projected_bias, projected_heights], axis=0) + + def cond(projection_counter, bias, heights, last_bias_change, + last_heights_change): + del bias, heights, last_bias_change, last_heights_change + return tf.less(projection_counter, + num_projection_iterations * num_projections) + + # Apply Dykstra's algorithm with tf.while_loop. + projection_counter = tf.constant(0) + last_bias_change = {k: zero_bias for k in last_bias_change} + last_heights_change = {k: zero_heights for k in last_heights_change} + (_, bias, heights, _, + _) = tf.while_loop(cond, body, (projection_counter, bias, heights, + last_bias_change, last_heights_change)) + + # Since Dykstra's algorithm is iterative in order to strictly meet constraints + # we use approximate projection algorithm to finalize them. + return _finalize_constraints( + bias=bias, + heights=heights, + monotonicity=monotonicity, + output_min=output_min, + output_max=output_max, + output_min_constraints=output_min_constraints, + output_max_constraints=output_max_constraints, + convexity=convexity, + lengths=lengths) + + +def _squeeze_by_scaling(bias, heights, monotonicity, output_min, output_max, + output_min_constraints, output_max_constraints): + """Squeezes monotonic calibrators by scaling in order to meet bounds. + + Projection by scaling is not exact with respect to the L2 norm, but maintains + convexity unlike projection by shift. + + Args: + bias: `(1, units)`-shape tensor which represents bias. + heights: `(num_heights, units)`-shape tensor which represents heights. + monotonicity: 1 for increasing, -1 for decreasing. + output_min: Lower bound constraint of PWL calibration layer. + output_max: Upper bound constraint of PWL calibration layer. + output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` + describing the constraints on the layer's minimum value. + output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` + describing the constraints on the layer's maximum value. + + Returns: + Projected bias and heights. + """ + if monotonicity == -1: + if output_min_constraints == BoundConstraintsType.NONE: + return bias, heights + # Reduce computation of projection of decreasing function to computation of + # projection of increasing function by multiplying everything by -1 and + # swapping maximums and minimums. + bias, heights = _squeeze_by_scaling( + bias=-bias, + heights=-heights, + monotonicity=1, + output_min=None if output_max is None else -output_max, + output_max=None if output_min is None else -output_min, + output_min_constraints=output_max_constraints, + output_max_constraints=output_min_constraints) + return -bias, -heights + if output_max_constraints == BoundConstraintsType.NONE: + return bias, heights + + delta = output_max - bias + # For better stability use tf.where rather than the more standard approach: + # heights *= tf.reduce_sum(heights) / max(delta, eps) + # in order to keep everything strictly unchanged for small deltas, rather than + # increase heights by factor 1/eps and still don't meet constraints. + scaling_factor = tf.where(delta > 0.001, + tf.reduce_sum(heights, axis=0) / delta, + tf.ones_like(delta)) + heights = heights / tf.maximum(scaling_factor, 1.0) + return bias, heights + + +def _approximately_project_convexity(heights, lengths, convexity): + """Strictly projects convexity, but is not exact with respect to the L2 norm. + + Projects by iterating over pieces of piecewise linear function left to right + and aligning current slope with previous one if it violates convexity. + + Args: + heights: `(num_heights, units)`-shape tensor which represents heights. + lengths: `(num_heights)`-shape tensor which represents lengths of segments + which correspond to heights. + convexity: -1 or 1 where 1 stands for convex function and -1 for concave. + + Returns: + Projected heights. + """ + if convexity == 0: + return heights + heights = tf.unstack(heights, axis=0) + lengths = tf.unstack(lengths, axis=0) + for i in range(1, len(heights)): + temp = heights[i - 1] * (lengths[i] / lengths[i - 1]) + if convexity == 1: + heights[i] = tf.maximum(heights[i], temp) + else: + heights[i] = tf.minimum(heights[i], temp) + + return tf.stack(heights, axis=0) + + +def _finalize_constraints(bias, heights, monotonicity, output_min, output_max, + output_min_constraints, output_max_constraints, + convexity, lengths): + """Strictly projects onto the given constraint, approximate w.r.t the L2 norm. + + Dykstra's algorithm gives us proper projection with respect to L2 norm but + approaches it from "wrong" side. In order to ensure that constraints are + strictly met we'll do approximate projections in the end which project + strictly into feasible space, but it's not an exact projection with respect to + the L2 norm. With enough iterations of the Dykstra's algorithm, the impact of + such approximate projection should be negligible. + + With bound and convexity constraints and no specified monotonicity, this + method does not fully satisfy the constrains. Increasing the number of + iterations can reduce the constraint violation in such cases. Fortunately it + does not seem to be common config. + + Args: + bias: `(1, units)`-shape tensor which represents bias. + heights: `(num_heights, units)`-shape tensor which represents heights. + monotonicity: 1 for increasing, -1 for decreasing, 0 for no monotonicity + constraints. + output_min: Lower bound constraint of PWL calibration layer. + output_max: Upper bound constraint of PWL calibration layer. + output_min_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` + describing the constraints on the layer's minimum value. + output_max_constraints: A `tfl.pwl_calibration_lib.BoundConstraintsType` + describing the constraints on the layer's maximum value. + convexity: 1 for convex, -1 for concave, 0 for no convexity constraints. + lengths: Lengths of pieces of piecewise linear function. Needed only if + convexity projection is specified. + + Returns: + Projected weights tensor. + """ + # Convexity and monotonicity projections don't violate each other, but both + # might lead to bounds violation, so do them first and fix bounds after. + if monotonicity != 0: + heights = _project_monotonicity(heights=heights, monotonicity=monotonicity) + if convexity != 0: + heights = _approximately_project_convexity( + heights=heights, lengths=lengths, convexity=convexity) + + bct = BoundConstraintsType + if output_min_constraints != bct.NONE or output_max_constraints != bct.NONE: + if monotonicity != 0 and convexity != 0: + # Both monotonicity and convexity projection can only increase upper bound + # so we only need to take care of decreasing it back. + bias, heights = _squeeze_by_scaling( + bias=bias, + heights=heights, + monotonicity=monotonicity, + output_min=output_min, + output_max=output_max, + output_min_constraints=output_min_constraints, + output_max_constraints=output_max_constraints) + else: + # This bounds projection might violate convexity. Unfortunately bounds + # projections with convexity and without monotonicity are are difficult to + # achieve strictly and might be violated. so ignore this for now. In order + # to minimize projection error consider increasing + # num_projection_iterations. + if output_min_constraints == bct.CLAMPED: + output_min_constraints = bct.BOUND + if output_max_constraints == bct.CLAMPED: + output_max_constraints = bct.BOUND + bias, heights = _approximately_project_bounds_only( + bias=bias, + heights=heights, + output_min=output_min, + output_max=output_max, + output_min_constraints=output_min_constraints, + output_max_constraints=output_max_constraints) + return tf.concat([bias, heights], axis=0) + + +def assert_constraints(outputs, + monotonicity, + output_min, + output_max, + clamp_min=False, + clamp_max=False, + debug_tensors=None, + eps=1e-6): + """Asserts that 'outputs' satisfiy constraints. + + Args: + outputs: Tensor of shape `(num_output_values, units)` which represents + outputs of pwl calibration layer which will be tested against the given + constraints. If monotonicity is specified these outputs must be for + consequtive inputs. + monotonicity: One of {-1, 0, 1}. -1 for decreasing, 1 for increasing 0 means + no monotonicity checks. + output_min: Lower bound or None. + output_max: Upper bound or None. + clamp_min: Whether one of outputs must match output_min. + clamp_max: Whther one of outputs must match output_max. + debug_tensors: None or list of anything convertible to tensor (for example + tensors or strings) which will be printed in case of constraints + violation. + eps: Allowed constraints violation. + + Raises: + ValueError: If monotonicity is not one of {-1, 0, 1} + + Returns: + List of assertion ops in graph mode or immideately asserts in eager mode. + """ + + info = ["Outputs: ", outputs, "Epsilon: ", eps] + if debug_tensors: + info += debug_tensors + asserts = [] + + if output_min is not None: + min_output = tf.reduce_min(outputs, axis=0) + if clamp_min: + asserts.append( + tf.Assert( + tf.reduce_all(tf.abs(min_output - output_min) <= eps), + data=["Clamp_min violation.", "output_min:", output_min] + info, + summarize=outputs.shape[0])) + else: + asserts.append( + tf.Assert( + tf.reduce_all(min_output >= output_min - eps), + data=["Lower bound violation.", "output_min:", output_min] + info, + summarize=outputs.shape[0])) + + if output_max is not None: + max_output = tf.reduce_max(outputs, axis=0) + if clamp_max: + asserts.append( + tf.Assert( + tf.reduce_all(tf.abs(max_output - output_max) <= eps), + data=["Clamp_max violation.", "output_max:", output_max] + info, + summarize=outputs.shape[0])) + else: + asserts.append( + tf.Assert( + tf.reduce_all(max_output <= output_max + eps), + data=["Upper bound violation.", "output_max:", output_max] + info, + summarize=outputs.shape[0])) + + if monotonicity not in [-1, 0, 1]: + raise ValueError("'monotonicity' must be one of: [-1, 0, 1]. It is: %s" % + monotonicity) + if monotonicity != 0: + diffs = (outputs[1:] - outputs[0:-1]) + asserts.append( + tf.Assert( + tf.reduce_min(diffs * monotonicity) >= -eps, + data=["Monotonicity violation.", "monotonicity:", monotonicity] + + info, + summarize=outputs.shape[0])) + + return asserts + + +def verify_hyperparameters(input_keypoints=None, + output_min=None, + output_max=None, + monotonicity=None, + convexity=None, + is_cyclic=False, + lengths=None, + weights_shape=None): + """Verifies that all given hyperparameters are consistent. + + See PWLCalibration class level comment for detailed description of arguments. + + Args: + input_keypoints: `input_keypoints` of PWLCalibration layer. + output_min: Smallest output of PWLCalibration layer. + output_max: Largest output of PWLCalibration layer. + monotonicity: `monotonicity` hyperparameter of PWLCalibration layer. + convexity: `convexity` hyperparameter of PWLCalibration layer. + is_cyclic: `is_cyclic` hyperparameter of PWLCalibration layer. + lengths: Lengths of pieces of piecewise linear function. + weights_shape: Shape of weights of PWLCalibration layer. + + Raises: + ValueError: If something is inconsistent. + """ + if input_keypoints is not None: + if tf.is_tensor(input_keypoints): + if len(input_keypoints.shape) != 1 or input_keypoints.shape[0] < 2: + raise ValueError("Input keypoints must be rank-1 tensor of size at " + "least 2. It is: " + str(input_keypoints)) + else: + if len(input_keypoints) < 2: + raise ValueError("At least 2 input keypoints must be provided. " + "Given: " + str(input_keypoints)) + if not all(input_keypoints[i] < input_keypoints[i + 1] + for i in range(len(input_keypoints) - 1)): + raise ValueError("Keypoints must be strictly increasing. They are: " + + str(input_keypoints)) + + if output_min is not None and output_max is not None: + if output_max < output_min: + raise ValueError("If specified output_max must be greater than " + "output_min. " + "They are: ({}, {})".format(output_min, output_max)) + + # It also raises errors if monotonicities specified incorrectly. + monotonicity = canonicalize_monotonicity(monotonicity) + convexity = canonicalize_convexity(convexity) + + if is_cyclic and (monotonicity or convexity): + raise ValueError("'is_cyclic' can not be specified together with " + "'monotonicity'({}) or 'convexity'({}).".format( + monotonicity, convexity)) + + if weights_shape is not None: + if len(weights_shape) != 2 or weights_shape[0] < 2: + raise ValueError("PWLCalibrator weights must have shape: [k, units] where" + " k > 1. It is: " + str(weights_shape)) + + if lengths is not None and weights_shape is not None: + if tf.is_tensor(lengths): + num_lengths = lengths.shape[0] + else: + num_lengths = len(lengths) + if num_lengths + 1 != weights_shape[0]: + raise ValueError("Number of lengths must be equal to number of weights " + "minus one. Lengths: %s, weights_shape: %s" % + (lengths, weights_shape)) + + +def canonicalize_monotonicity(monotonicity): + """Converts string constants representing monotonicity into integers. + + Args: + monotonicity: monotonicity hyperparameter of `PWLCalibration` layer. + + Raises: + ValueError if monotonicity is invalid. + + Returns: + monotonicity represented as -1, 0 or 1. + """ + if monotonicity is None: + return None + + if monotonicity in [-1, 0, 1]: + return monotonicity + elif isinstance(monotonicity, six.string_types): + if monotonicity.lower() == "decreasing": + return -1 + if monotonicity.lower() == "none": + return 0 + if monotonicity.lower() == "increasing": + return 1 + raise ValueError("'monotonicities' must be from: [-1, 0, 1, 'decreasing', " + "'none', 'increasing']. Given: %s" % monotonicity) + + +def canonicalize_convexity(convexity): + """Converts string constants representing convexity into integers. + + Args: + convexity: convexity hyperparameter of `PWLCalibration` layer. + + Raises: + ValueError if convexity is invalid. + + Returns: + convexity represented as -1, 0 or 1. + """ + if convexity is None: + return None + + if convexity in [-1, 0, 1]: + return convexity + elif isinstance(convexity, six.string_types): + if convexity.lower() == "concave": + return -1 + if convexity.lower() == "none": + return 0 + if convexity.lower() == "convex": + return 1 + raise ValueError("'convexity' must be from: [-1, 0, 1, 'concave', " + "'none', 'convex']. Given: %s" % convexity) diff --git a/tensorflow_lattice/python/pwl_calibration_test.py b/tensorflow_lattice/python/pwl_calibration_test.py new file mode 100644 index 0000000..3fb7939 --- /dev/null +++ b/tensorflow_lattice/python/pwl_calibration_test.py @@ -0,0 +1,1119 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for PWL calibration layer. + +This test should be run with "-c opt" since otherwise it's slow. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +from absl import logging +from absl.testing import parameterized +import numpy as np +import tensorflow as tf +from tensorflow import keras +from tensorflow_lattice.python import parallel_combination_layer as parallel_combination +from tensorflow_lattice.python import pwl_calibration_layer as pwl_calibraion +from tensorflow_lattice.python import pwl_calibration_lib as pwl_lib +from tensorflow_lattice.python import test_utils + + +class CalibrateWithSeparateMissing(tf.keras.layers.Layer): + """Create separate is_missing tensor. + + Splits input tensor into list: [input_tensor, is_missing_tensor] and passes + this list as input to given calibration layer. + """ + + def __init__(self, calibration_layer, missing_input_value): + super(CalibrateWithSeparateMissing, self).__init__() + self.calibration_layer = calibration_layer + self.missing_input_value = missing_input_value + + def call(self, x): + is_missing = tf.cast(tf.equal(x, self.missing_input_value), + dtype=tf.float32) + return self.calibration_layer([x, is_missing]) + + +class PwlCalibrationLayerTest(parameterized.TestCase, tf.test.TestCase): + + def setUp(self): + self._disable_all = False + self._loss_eps = 0.0001 + self._small_eps = 1e-6 + super(PwlCalibrationLayerTest, self).setUp() + + def _ResetAllBackends(self): + keras.backend.clear_session() + tf.compat.v1.reset_default_graph() + + def _ScatterXUniformly(self, units, num_points, input_min, input_max, + missing_probability, missing_input_value): + """Randomly uniformly scatters points across input space.""" + np.random.seed(41) + x = [ + input_min + np.random.random(units) * (input_max - input_min) + for _ in range(num_points) + ] + if missing_probability > 0.0: + is_missings = np.random.random([num_points, units]) < missing_probability + x = [ + is_missing * missing_input_value + (1. - is_missing) * point + for point, is_missing in zip(x, is_missings) + ] + x.sort(key=np.sum) + return x + + def _ScatterXUniformlyIncludeBounds(self, units, **kwargs): + """Same as _ScatterXUniformly() but includes bounds.""" + x = self._ScatterXUniformly(units, **kwargs) + x[0] = np.array([kwargs["input_min"]] * units) + x[-1] = np.array([kwargs["input_max"]] * units) + return x + + def _SmallWaves(self, x): + return np.mean( + np.power(x, 3) + 0.1 * np.sin(x * math.pi * 8), keepdims=True) + + def _SmallWavesPlusOne(self, x): + return self._SmallWaves(x) + 1.0 + + def _WavyParabola(self, x): + return np.mean( + np.power(x, 2) + 0.1 * np.sin(x * math.pi * 8) - 0.5, keepdims=True) + + def _SinCycle(self, x): + # Almost entire cycle of sin. + return np.mean(np.sin(x / 26.0 * (2.0 * math.pi)), keepdims=True) + + def _GenPWLFunction(self, input_keypoints, pwl_weights): + """Returns python function equivalent to PWL calibration layer. + + Output of returned function is equivalent ot output of PWL calibration layer + with keypoints being 'input_keypoints' and learned weights being + 'pwl_weights'. + + Args: + input_keypoints: list of keypoints of PWL calibration layer. + pwl_weights: list of weights of PWL calibration layer. + """ + + def Pwl(x): + result = pwl_weights[0] + for begin, end, weight in zip(input_keypoints[0:-1], input_keypoints[1:], + pwl_weights[1:]): + result += weight * np.maximum( + np.minimum((x - begin) / (end - begin), 1.0), 0.0) + return np.mean(result, keepdims=True) + + return Pwl + + def _SetDefaults(self, config): + config.setdefault("units", 1) + config.setdefault("use_multi_calibration_layer", False) + config.setdefault("one_d_input", False) + config.setdefault("use_separate_missing", False) + config.setdefault("output_min", None) + config.setdefault("output_max", None) + config.setdefault("missing_input_value", None) + config.setdefault("missing_output_value", None) + config.setdefault("monotonicity", 0) + config.setdefault("convexity", 0) + config.setdefault("is_cyclic", False) + config.setdefault("clamp_min", False) + config.setdefault("clamp_max", False) + config.setdefault("initializer", "equal_heights") + config.setdefault("kernel_regularizer", None) + config.setdefault("impute_missing", False) + config.setdefault("missing_probability", 0.0) + config.setdefault("num_projection_iterations", 8) + config.setdefault("constraint_assertion_eps", 1e-6) + config.setdefault("model_dir", "/tmp/test_pwl_model_dir/") + + if "input_keypoints" not in config: + # If "input_keypoints" are provided - other params referred by code below + # might be not available. + config.setdefault("input_keypoints", + np.linspace(start=config["input_min"], + stop=config["input_max"], + num=config["num_keypoints"])) + return config + + def _TrainModel(self, config, plot_path=None): + """Trains model and returns loss. + + Args: + config: Layer config internal for this test which specifies params of + piecewise linear layer to train. + plot_path: if specified - png file name to save visualization. See + test_utils.run_training_loop() for more details. + + Returns: + Training loss. + """ + logging.info("Testing config:") + logging.info(config) + if plot_path is not None and config["units"] > 1: + raise ValueError("Test config error. " + "Can not plot multi unit calibrators.") + config = self._SetDefaults(config) + + self._ResetAllBackends() + + # The input to the model can either be single or multi dimensional. + input_units = 1 if config["one_d_input"] else config["units"] + + training_inputs = config["x_generator"]( + units=input_units, + num_points=config["num_training_records"], + input_min=config["input_keypoints"][0], + input_max=config["input_keypoints"][-1], + missing_probability=config["missing_probability"], + missing_input_value=config["missing_input_value"]) + training_labels = [config["y_function"](x) for x in training_inputs] + + # Either create multiple PWLCalibration layers and combine using a + # ParallelCombination layer, or create a single PWLCalibration with multiple + # output dimensions. + if config["use_multi_calibration_layer"]: + num_calibration_layers = config["units"] + pwl_calibration_units = 1 + else: + num_calibration_layers = 1 + pwl_calibration_units = config["units"] + + model = keras.models.Sequential() + model.add(tf.keras.layers.Input(shape=[input_units], dtype=tf.float32)) + calibration_layers = [] + for _ in range(num_calibration_layers): + calibration_layers.append( + pwl_calibraion.PWLCalibration( + units=pwl_calibration_units, + dtype=tf.float32, + input_keypoints=config["input_keypoints"], + output_min=config["output_min"], + output_max=config["output_max"], + clamp_min=config["clamp_min"], + clamp_max=config["clamp_max"], + monotonicity=config["monotonicity"], + convexity=config["convexity"], + is_cyclic=config["is_cyclic"], + kernel_initializer=config["initializer"], + kernel_regularizer=config["kernel_regularizer"], + impute_missing=config["impute_missing"], + missing_output_value=config["missing_output_value"], + missing_input_value=config["missing_input_value"], + num_projection_iterations=config["num_projection_iterations"])) + if len(calibration_layers) == 1: + if config["use_separate_missing"]: + model.add(CalibrateWithSeparateMissing( + calibration_layer=calibration_layers[0], + missing_input_value=config["missing_input_value"])) + else: + model.add(calibration_layers[0]) + else: + model.add(parallel_combination.ParallelCombination(calibration_layers)) + + if config["units"] > 1: + model.add(keras.layers.Lambda( + lambda x: tf.reduce_mean(x, axis=1, keepdims=True))) + + model.compile( + loss=keras.losses.mean_squared_error, + optimizer=config["optimizer"](learning_rate=config["learning_rate"])) + + training_data = (training_inputs, training_labels, training_inputs) + + loss = test_utils.run_training_loop( + config=config, + training_data=training_data, + keras_model=model, + plot_path=plot_path) + + assetion_ops = [] + for calibration_layer in calibration_layers: + assetion_ops.extend( + calibration_layer.assert_constraints( + eps=config["constraint_assertion_eps"])) + if not tf.executing_eagerly() and assetion_ops: + tf.compat.v1.keras.backend.get_session().run(assetion_ops) + + return loss + + def _InverseAndTrain(self, config): + """Changes monotonicity directions to opposite and trains model.""" + inversed_config = dict(config) + inversed_config["y_function"] = lambda x: -config["y_function"](x) + + inversed_config["output_max"] = config["output_min"] + if inversed_config["output_max"] is not None: + inversed_config["output_max"] = inversed_config["output_max"] * -1.0 + + inversed_config["output_min"] = config["output_max"] + if inversed_config["output_min"] is not None: + inversed_config["output_min"] = inversed_config["output_min"] * -1.0 + + inversed_config["clamp_min"] = config["clamp_max"] + inversed_config["clamp_max"] = config["clamp_min"] + inversed_config["monotonicity"] = -pwl_lib.canonicalize_monotonicity( + config["monotonicity"]) + inversed_config["convexity"] = -pwl_lib.canonicalize_convexity( + config["convexity"]) + inversed_loss = self._TrainModel(inversed_config) + return inversed_loss + + @parameterized.parameters( + (1, False, 0.001022), + (3, False, 0.000543), + (3, True, 0.000987), + ) + def testUnconstrainedNoMissingValue(self, units, one_d_input, expected_loss): + if self._disable_all: + return + config = { + "units": units, + "one_d_input": one_d_input, + "num_training_records": 100, + "num_training_epoch": 2000, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.15, + "x_generator": self._ScatterXUniformly, + "y_function": self._SmallWaves, + "monotonicity": 0, + "num_keypoints": 21, + "input_min": -1.0, + "input_max": 1.0, + "output_min": None, + "output_max": None, + } + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + if units > 1 and not one_d_input: + config["use_multi_calibration_layer"] = True + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + + @parameterized.parameters( + (1, None, 0.000858), + (1, 0.5, 0.637769), + (3, None, 0.000471), + (3, 0.5, 0.190513), + ) + def testUnconstrainedWithMissingValue(self, units, missing_output_value, + expected_loss): + if self._disable_all: + return + config = { + "units": units, + "num_training_records": 100, + "num_training_epoch": 2000, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.15, + "x_generator": self._ScatterXUniformly, + "y_function": self._SmallWaves, + "monotonicity": 0, + "num_keypoints": 21, + "input_min": -1.0, + "input_max": 1.0, + "output_min": None, + "output_max": None, + "impute_missing": True, + "missing_input_value": -1.2, + "missing_output_value": missing_output_value, + "missing_probability": 0.1, + } + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + config["use_separate_missing"] = True + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + if units > 1: + config["use_multi_calibration_layer"] = True + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + + @parameterized.parameters( + (1, -1.5, 1.5, tf.keras.optimizers.SGD, 2100, 0.002957), + (1, -1.5, 1.5, tf.keras.optimizers.Adagrad, 2100, 0.002798), + # TODO: Something really weird is going on here with Adam + # optimizer in case when num_training_epoch is exactly 2010. + # Test verifies result with 2100 epochs which behaves as expected. + (1, -1.5, 1.5, tf.keras.optimizers.Adam, 2100, 0.000769), + (1, -0.5, 0.5, tf.keras.optimizers.SGD, 200, 0.011483), + (1, -0.5, 0.5, tf.keras.optimizers.Adagrad, 200, 0.011645), + (1, -0.5, 0.5, tf.keras.optimizers.Adam, 200, 0.011116), + (3, -1.5, 1.5, tf.keras.optimizers.Adagrad, 2100, 0.001759), + (3, -0.5, 0.5, tf.keras.optimizers.Adagrad, 200, 0.005986), + ) + def testNonMonotonicFunction(self, units, output_min, output_max, optimizer, + num_training_epoch, expected_loss): + if self._disable_all: + return + config = { + "units": units, + "num_training_records": 100, + "num_training_epoch": 2100, + "optimizer": tf.keras.optimizers.SGD, + "learning_rate": 0.015, + "x_generator": self._ScatterXUniformly, + "y_function": self._SmallWaves, + "monotonicity": 0, + "num_keypoints": 21, + "input_min": -1.0, + "input_max": 1.0, + "output_min": -1.5, + "output_max": 1.5, + "clamp_min": False, + "clamp_max": False, + } + config["output_min"] = output_min + config["output_max"] = output_max + config["optimizer"] = optimizer + config["num_training_epoch"] = num_training_epoch + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + if units > 1: + config["use_multi_calibration_layer"] = True + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + + @parameterized.parameters( + (1, -1.5, 0.287357), + (1, 1.5, 0.287357), + (3, -1.5, 0.122801), + (3, 1.5, 0.106150), + ) + # Since function is symmetric result should be same for both values above. + def testBoundsForMissing(self, units, missing_input_value, expected_loss): + if self._disable_all: + return + config = { + "units": units, + "num_training_records": 100, + "num_training_epoch": 200, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.15, + "x_generator": self._ScatterXUniformly, + "y_function": self._SmallWaves, + "monotonicity": 1, + "num_keypoints": 21, + "input_min": -1.0, + "input_max": 1.0, + "output_min": -2.0, + "output_max": 2.0, + "clamp_min": False, + "clamp_max": True, + "impute_missing": True, + "missing_probability": 0.1, + } + config["missing_input_value"] = missing_input_value + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + if units > 1: + config["use_multi_calibration_layer"] = True + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + + @parameterized.parameters( + (1, None, None, 0.002505), + (1, None, 1.21, 0.008076), + (1, None, 1.6, 0.000251), + (1, None, 2.0, 0.001107), + (1, 0.5, None, 0.000790), + (1, 0.5, 1.21, 0.008353), + (1, 0.5, 1.6, 0.000685), + (1, 0.5, 2.0, 0.000694), + (1, 0.9, None, 0.000143), + (1, 0.9, 1.21, 0.008108), + (1, 0.9, 1.6, 0.000125), + (1, 0.9, 2.0, 0.000120), + (1, 1.2, None, 0.025762), + (1, 1.2, 1.21, 0.026069), + (1, 1.2, 1.6, 0.025240), + (1, 1.2, 2.0, 0.024802), + (3, None, None, 0.003268), + (3, None, 1.21, 0.003901), + (3, None, 1.6, 0.000897), + (3, None, 2.0, 0.002608), + (3, 0.5, None, 0.000945), + (3, 0.5, 1.21, 0.004830), + (3, 0.5, 1.6, 0.000945), + (3, 0.5, 2.0, 0.000923), + (3, 0.9, None, 0.000318), + (3, 0.9, 1.21, 0.004215), + (3, 0.9, 1.6, 0.000335), + (3, 0.9, 2.0, 0.000297), + (3, 1.2, None, 0.011354), + (3, 1.2, 1.21, 0.011354), + (3, 1.2, 1.6, 0.011354), + (3, 1.2, 2.0, 0.011354), + ) + def testAllBoundsWithoutMonotonicityConstraints(self, units, output_min, + output_max, expected_loss): + if self._disable_all: + return + config = { + "units": units, + "num_training_records": 100, + "num_training_epoch": 200, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.15, + "x_generator": self._ScatterXUniformly, + "y_function": self._SmallWavesPlusOne, + "monotonicity": 0, + "num_keypoints": 21, + "input_min": 0.1, + "input_max": 0.8, + "clamp_min": False, + "clamp_max": False, + } + config["output_min"] = output_min + config["output_max"] = output_max + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + if units > 1: + config["use_multi_calibration_layer"] = True + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + + @parameterized.parameters( + (1, False, tf.keras.optimizers.SGD, 0.004715), + (1, False, tf.keras.optimizers.Adagrad, 0.003820), + (1, False, tf.keras.optimizers.Adam, 0.002797), + (1, True, tf.keras.optimizers.SGD, 0.004427), + (1, True, tf.keras.optimizers.Adagrad, 0.004084), + # Adam is doing terrible when required to stretch monotonic function + # even if bounds are proper. + (1, True, tf.keras.optimizers.Adam, 0.065664), + (3, False, tf.keras.optimizers.Adagrad, 0.002371), + (3, True, tf.keras.optimizers.Adagrad, 0.002670), + ) + def testMonotonicProperBounds(self, units, is_clamped, optimizer, + expected_loss): + if self._disable_all: + return + config = { + "units": units, + "num_training_records": 100, + "num_training_epoch": 400, + "optimizer": optimizer, + "learning_rate": 0.015, + "x_generator": self._ScatterXUniformly, + "y_function": self._SmallWaves, + "monotonicity": "increasing", + "num_keypoints": 21, + "input_min": -1.0, + "input_max": 1.0, + "output_min": -1.0, + "output_max": 1.0, + "clamp_min": is_clamped, + "clamp_max": is_clamped, + } + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + if units > 1: + config["use_multi_calibration_layer"] = True + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + + @parameterized.parameters( + (1, False, tf.keras.optimizers.SGD, 0.15, 0.009563), + (1, False, tf.keras.optimizers.Adagrad, 0.015, 0.011117), + (1, False, tf.keras.optimizers.Adam, 0.015, 0.015356), + (1, True, tf.keras.optimizers.SGD, 0.15, 0.009563), + (1, True, tf.keras.optimizers.Adagrad, 0.015, 0.011117), + # Adam squeezes monotonic function just slightly worse than adagrad. + (1, True, tf.keras.optimizers.Adam, 0.015, 0.015189), + (3, False, tf.keras.optimizers.Adagrad, 0.015, 0.006057), + (3, True, tf.keras.optimizers.Adagrad, 0.015, 0.006049), + ) + def testMonotonicNarrowBounds(self, units, is_clamped, optimizer, + learning_rate, expected_loss): + if self._disable_all: + return + config = { + "units": units, + "num_training_records": 100, + "num_training_epoch": 200, + "optimizer": optimizer, + "learning_rate": learning_rate, + "x_generator": self._ScatterXUniformly, + "y_function": self._SmallWaves, + "monotonicity": 1, + "num_keypoints": 21, + "input_min": -1.0, + "input_max": 1.0, + "output_min": -0.5, + "output_max": 0.5, + "clamp_min": is_clamped, + "clamp_max": is_clamped, + } + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + if units > 1: + config["use_multi_calibration_layer"] = True + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + + @parameterized.parameters( + (1, False, tf.keras.optimizers.SGD, 0.005920), + (1, False, tf.keras.optimizers.Adagrad, 0.006080), + (1, False, tf.keras.optimizers.Adam, 0.002914), + (1, True, tf.keras.optimizers.SGD, 0.013836), + (1, True, tf.keras.optimizers.Adagrad, 0.066928), + # Adam is doing terrible when required to stretch monotonic function. + (1, True, tf.keras.optimizers.Adam, 0.230402), + (3, False, tf.keras.optimizers.Adagrad, 0.004891), + (3, True, tf.keras.optimizers.Adagrad, 0.021490), + ) + def testMonotonicWideBounds(self, units, is_clamped, optimizer, + expected_loss): + if self._disable_all: + return + config = { + "units": units, + "num_training_records": 100, + "num_training_epoch": 400, + "optimizer": optimizer, + "learning_rate": 0.015, + "x_generator": self._ScatterXUniformly, + "y_function": self._SmallWaves, + "monotonicity": 1, + "num_keypoints": 21, + "input_min": -1.0, + "input_max": 1.0, + "output_min": -1.5, + "output_max": 1.5, + "clamp_min": is_clamped, + "clamp_max": is_clamped, + } + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + if units > 1: + config["use_multi_calibration_layer"] = True + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + + @parameterized.parameters( + (1, None, None, False, False, 0.003744), + (1, None, None, False, True, 0.003744), + (1, None, 1.6, True, False, 0.001456), + (1, None, 1.6, True, True, 0.001465), + (1, None, 2.0, False, False, 0.001712), + (1, None, 2.0, False, True, 0.01623), + (1, None, 2.0, True, False, 0.001712), + (1, None, 2.0, True, True, 0.01623), + (1, 0.5, None, False, False, 0.002031), + (1, 0.5, None, False, True, 0.002031), + (1, 0.5, None, True, False, 0.003621), + (1, 0.5, None, True, True, 0.003621), + (1, None, None, True, False, 0.003744), + (1, 0.5, 1.21, False, False, 0.007572), + (1, 0.5, 1.21, False, True, 0.007572), + (1, 0.5, 1.21, True, False, 0.009876), + (1, 0.5, 1.21, True, True, 0.009876), + (1, 0.5, 1.6, False, False, 0.001916), + (1, 0.5, 1.6, False, True, 0.001737), + (1, 0.5, 1.6, True, False, 0.003103), + (1, 0.5, 1.6, True, True, 0.002692), + (1, 0.5, 2.0, False, False, 0.001873), + (1, 0.5, 2.0, False, True, 0.003333), + (1, None, None, True, True, 0.003744), + (1, 0.5, 2.0, True, False, 0.003315), + (1, 0.5, 2.0, True, True, 0.004289), + (1, 0.9, None, False, False, 0.00151), + (1, 0.9, None, False, True, 0.00151), + (1, 0.9, None, True, False, 0.001552), + (1, 0.9, None, True, True, 0.001552), + (1, 0.9, 1.21, False, False, 0.005387), + (1, 0.9, 1.21, False, True, 0.005387), + (1, 0.9, 1.21, True, False, 0.005427), + (1, 0.9, 1.21, True, True, 0.005427), + (1, None, 1.21, False, False, 0.005366), + (1, 0.9, 1.6, False, False, 0.0015), + (1, 0.9, 1.6, False, True, 0.001454), + (1, 0.9, 1.6, True, False, 0.001546), + (1, 0.9, 1.6, True, True, 0.001514), + (1, 0.9, 2.0, False, False, 0.001501), + (1, 0.9, 2.0, False, True, 0.003067), + (1, 0.9, 2.0, True, False, 0.001547), + (1, 0.9, 2.0, True, True, 0.00312), + (1, 1.2, None, False, False, 0.021835), + (1, 1.2, None, False, True, 0.021835), + (1, None, 1.21, False, True, 0.005366), + (1, 1.2, None, True, False, 0.021835), + (1, 1.2, None, True, True, 0.021835), + (1, 1.2, 1.21, False, False, 0.025733), + (1, 1.2, 1.21, False, True, 0.025733), + (1, 1.2, 1.21, True, False, 0.025733), + (1, 1.2, 1.21, True, True, 0.025733), + (1, 1.2, 1.6, False, False, 0.021834), + (1, 1.2, 1.6, False, True, 0.021967), + (1, 1.2, 1.6, True, False, 0.021834), + (1, 1.2, 1.6, True, True, 0.021967), + (1, None, 1.21, True, False, 0.005366), + (1, 1.2, 2.0, False, False, 0.021834), + (1, 1.2, 2.0, False, True, 0.023642), + (1, 1.2, 2.0, True, False, 0.021834), + (1, 1.2, 2.0, True, True, 0.023642), + (1, None, 1.21, True, True, 0.005366), + (1, None, 1.6, False, False, 0.001456), + (1, None, 1.6, False, True, 0.001465), + (3, None, None, False, False, 0.003969), + (3, None, None, False, True, 0.003969), + (3, 0.5, None, True, False, 0.003125), + (3, 0.5, None, True, True, 0.003125), + (3, None, None, True, False, 0.003969), + (3, 0.5, 1.21, False, False, 0.003676), + (3, 0.5, 1.21, False, True, 0.003676), + (3, 0.5, 1.21, True, False, 0.006550), + (3, 0.5, 1.21, True, True, 0.006550), + (3, 0.5, 1.6, False, False, 0.001246), + (3, 0.5, 1.6, False, True, 0.001000), + (3, 0.5, 1.6, True, False, 0.002775), + (3, None, 1.6, True, False, 0.000662), + (3, 0.5, 1.6, True, True, 0.002720), + (3, 0.5, 2.0, False, False, 0.001272), + (3, 0.5, 2.0, False, True, 0.001779), + (3, None, None, True, True, 0.003969), + (3, 0.5, 2.0, True, False, 0.002852), + (3, 0.5, 2.0, True, True, 0.003496), + (3, 0.9, None, False, False, 0.000597), + (3, 0.9, None, False, True, 0.000597), + (3, 0.9, None, True, False, 0.000678), + (3, 0.9, None, True, True, 0.000678), + (3, None, 1.6, True, True, 0.000640), + (3, 0.9, 1.21, False, False, 0.002630), + (3, 0.9, 1.21, False, True, 0.002630), + (3, 0.9, 1.21, True, False, 0.002906), + (3, 0.9, 1.21, True, True, 0.002906), + (3, None, 1.21, False, False, 0.002565), + (3, 0.9, 1.6, False, False, 0.000575), + (3, 0.9, 1.6, False, True, 0.000520), + (3, 0.9, 1.6, True, False, 0.000648), + (3, 0.9, 1.6, True, True, 0.000606), + (3, 0.9, 2.0, False, False, 0.000556), + (3, None, 2.0, False, False, 0.000901), + (3, 0.9, 2.0, False, True, 0.001230), + (3, 0.9, 2.0, True, False, 0.000636), + (3, 0.9, 2.0, True, True, 0.001314), + (3, 1.2, None, False, False, 0.010638), + (3, 1.2, None, False, True, 0.010638), + (3, None, 1.21, False, True, 0.002565), + (3, 1.2, None, True, False, 0.010638), + (3, 1.2, None, True, True, 0.010638), + (3, 1.2, 1.21, False, False, 0.011300), + (3, 1.2, 1.21, False, True, 0.011309), + (3, None, 2.0, False, True, 0.003166), + (3, 1.2, 1.21, True, False, 0.011300), + (3, 1.2, 1.21, True, True, 0.011309), + (3, 1.2, 1.6, False, False, 0.010631), + (3, 1.2, 1.6, False, True, 0.012681), + (3, 1.2, 1.6, True, False, 0.010631), + (3, 1.2, 1.6, True, True, 0.012681), + (3, None, 1.21, True, False, 0.002565), + (3, 1.2, 2.0, False, False, 0.010627), + (3, 1.2, 2.0, False, True, 0.016435), + (3, 1.2, 2.0, True, False, 0.010627), + (3, None, 2.0, True, False, 0.000901), + (3, 1.2, 2.0, True, True, 0.016435), + (3, None, 1.21, True, True, 0.002565), + (3, None, 1.6, False, False, 0.000662), + (3, None, 1.6, False, True, 0.000640), + (3, None, 2.0, True, True, 0.003166), + (3, 0.5, None, False, False, 0.001334), + (3, 0.5, None, False, True, 0.001334), + ) + def testAllBoundsAndMonotonicityDirection(self, units, output_min, output_max, + clamp_min, clamp_max, + expected_loss): + if self._disable_all: + return + config = { + "units": units, + "num_training_records": 100, + "num_training_epoch": 200, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.15, + "x_generator": self._ScatterXUniformly, + "y_function": self._SmallWavesPlusOne, + "monotonicity": 1, + "num_keypoints": 21, + "input_min": 0.1, + "input_max": 0.8, + "output_min": output_min, + "output_max": output_max, + "clamp_min": clamp_min, + "clamp_max": clamp_max, + } + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + self.assertAlmostEqual( + loss, self._InverseAndTrain(config), delta=self._small_eps) + if units > 1: + config["use_multi_calibration_layer"] = True + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + self.assertAlmostEqual( + loss, self._InverseAndTrain(config), delta=self._small_eps) + + @parameterized.parameters( + (1, 1, 0.018919), + (1, -1, 0.019434), + (3, "convex", 0.008592), + (3, "concave", 0.01134), + ) + def testConvexitySimple(self, units, convexity, expected_loss): + # No constraints other than convexity. + if self._disable_all: + return + config = { + "units": units, + "num_training_records": 100, + "num_training_epoch": 120, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._SmallWaves, + "monotonicity": "none", + "convexity": convexity, + "num_keypoints": 21, + "input_min": -1.0, + "input_max": 1.0, + "output_min": None, + "output_max": None, + "num_projection_iterations": 18, + } + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + if units > 1: + config["use_multi_calibration_layer"] = True + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + + @parameterized.parameters( + (1, 1, 0.006286), + (1, -1, 0.078076), + (3, 1, 0.002941), + (3, -1, 0.032497), + ) + def testConvexityNonUniformKeypoints(self, units, convexity, expected_loss): + # No constraints other than convexity. + if self._disable_all: + return + + config = { + "units": units, + "num_training_records": 100, + "num_training_epoch": 200, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 1.0, + "x_generator": self._ScatterXUniformly, + "y_function": self._WavyParabola, + "monotonicity": 0, + "convexity": convexity, + "input_keypoints": [-1.0, -0.9, -0.3, -0.2, 0.0, 0.3, 0.31, 0.35, 1.0], + "output_min": None, + "output_max": None, + } + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + if units > 1: + config["use_multi_calibration_layer"] = True + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + + @parameterized.parameters( + (1, 2, 0.033706), + (1, 3, 0.006485), + (1, 4, 0.005128), + (1, 5, 0.004878), + (1, 6, 0.005083), + (1, 7, 0.004860), + (3, 2, 0.013585), + (3, 3, 0.003311), + (3, 4, 0.002633), + (3, 5, 0.001909), + (3, 6, 0.001822), + (3, 7, 0.001599), + ) + def testConvexityDifferentNumKeypoints(self, units, num_keypoints, + expected_loss): + # No constraints other than convexity. + if self._disable_all: + return + + config = { + "units": units, + "num_training_records": 100, + "num_training_epoch": 120, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.3, + "x_generator": self._ScatterXUniformly, + "y_function": self._WavyParabola, + "monotonicity": 0, + "convexity": 1, + "num_keypoints": num_keypoints, + "input_min": -0.8, + "input_max": 0.8, + "output_min": None, + "output_max": None, + } + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + if units > 1: + config["use_multi_calibration_layer"] = True + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + + @parameterized.parameters( + (1, "increasing", None, 0.055837), + (1, "decreasing", None, 0.046657), + (1, "none", 0.0, 0.027777), + (1, "increasing", 0.0, 0.065516), + (1, "decreasing", 0.0, 0.057453), + (3, "increasing", None, 0.022467), + (3, "decreasing", None, 0.019012), + (3, "none", 0.0, 0.014693), + (3, "increasing", 0.0, 0.026284), + (3, "decreasing", 0.0, 0.025498), + ) + def testConvexityWithMonotonicityAndBounds(self, units, monotonicity, + output_max, expected_loss): + if self._disable_all: + return + + config = { + "units": units, + "num_training_records": 100, + "num_training_epoch": 120, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.5, + "x_generator": self._ScatterXUniformly, + "y_function": self._WavyParabola, + "monotonicity": monotonicity, + "convexity": 1, + "num_keypoints": 21, + "input_min": -1.0, + "input_max": 1.0, + "output_min": None, + "output_max": output_max, + "num_projection_iterations": 8, + } + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + self.assertAlmostEqual( + loss, self._InverseAndTrain(config), delta=self._small_eps) + if units > 1: + config["use_multi_calibration_layer"] = True + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + self.assertAlmostEqual( + loss, self._InverseAndTrain(config), delta=self._small_eps) + + @parameterized.parameters( + ([-1.0, -0.8, 0.0, 0.2, 0.8, 1.0],), + (np.array([-1.0, -0.8, 0.0, 0.2, 0.8, 1.0]),), + ) + def testInputKeypoints(self, keypoints): + if self._disable_all: + return + config = { + "num_training_records": 100, + "num_training_epoch": 200, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.15, + "x_generator": self._ScatterXUniformly, + "y_function": self._SmallWaves, + "monotonicity": 0, + "input_keypoints": keypoints, + "output_min": None, + "output_max": None, + } + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.009650, delta=self._loss_eps) + + @parameterized.parameters( + (1, None, 600, 0.002058), + (1, ("laplacian", 0.01, 0.0), 420, 0.040492), + (1, ("hessian", 0.01, 0.01), 300, 0.040932), + (1, ("wrinkle", 0.01, 0.01), 300, 0.027430), + (3, None, 600, 0.002150), + (3, ("laplacian", 0.01, 0.0), 420, 0.096667), + (3, ("hessian", 0.01, 0.01), 300, 0.092306), + (3, ("wrinkle", 0.01, 0.01), 300, 0.064053), + ) + def testIsCyclic(self, units, regularizer, num_training_epoch, expected_loss): + if self._disable_all: + return + config = { + "units": units, + "num_training_records": 100, + "num_training_epoch": num_training_epoch, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.15, + "x_generator": self._ScatterXUniformlyIncludeBounds, + "y_function": self._SinCycle, + "monotonicity": 0, + "input_min": 0.0, + "input_max": 24.0, + "num_keypoints": 10, + "is_cyclic": True, + "kernel_regularizer": regularizer, + "output_min": None, + "output_max": None, + } + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + if units > 1: + config["use_multi_calibration_layer"] = True + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + + @parameterized.parameters( + (1, "equal_heights", 0.332572), + (1, "equal_slopes", 0.476452), + (3, "equal_heights", 0.271896), + (3, "equal_slopes", 0.356754), + ) + def testInitializer(self, units, initializer, expected_loss): + if self._disable_all: + return + config = { + "units": units, + "num_training_records": 100, + # 0 training epochs to see pure output of initializer. + "num_training_epoch": 0, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.15, + "x_generator": self._ScatterXUniformly, + "y_function": self._SmallWaves, + "monotonicity": 0, + "input_keypoints": [-1.0, -0.8, 0.0, 0.2, 0.8, 1.0], + "output_min": -1.0, + "output_max": 2.0, + "initializer": initializer, + } + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + if units > 1: + config["use_multi_calibration_layer"] = True + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps) + + # TODO: this test is only using the first piece of the PWL. + @parameterized.parameters( + (1, ("laplacian", 0.01, 0.001), 0.091, 0.089631), + (1, ("Hessian", 0.01, 0.001), 0.035, 0.033504), + (1, ("wrinkle", 0.01, 0.001), 0.011, 0.007018), + # Standard Keras regularizer: + (1, keras.regularizers.l1_l2(l1=0.01, l2=0.001), 0.091, 0.089906), + # List of regularizers: + (1, [("Hessian", 0.01, 0.001), + keras.regularizers.l1_l2(l1=0.01, l2=0.001)], 0.126, 0.122192), + (3, ("laplacian", 0.01, 0.001), 0.273, 0.263244), + (3, ("Hessian", 0.01, 0.001), 0.105, 0.097368), + (3, ("wrinkle", 0.01, 0.001), 0.033, 0.013650), + # Standard Keras regularizer: + (3, keras.regularizers.l1_l2(l1=0.01, l2=0.001), 0.273, 0.265924), + # List of regularizers: + (3, [("Hessian", 0.01, 0.001), + keras.regularizers.l1_l2(l1=0.01, l2=0.001)], 0.378, 0.354917), + ) + def testRegularizers(self, units, regularizer, pure_reg_loss, training_loss): + if self._disable_all: + return + keypoints = [0.0, 1.0, 2.0, 3.0] + pwl_weights = [0.0, 1.0, 2.0, 4.0] + multi_pwl_weights = [[w] * units for w in pwl_weights] + # Keypoint outputs which correspond to weights: [0.0, 1.0, 3.0, 7.0] + config = { + "units": units, + "num_training_records": 100, + "num_training_epoch": 0, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.15, + "x_generator": self._ScatterXUniformly, + "input_keypoints": keypoints, + "y_function": self._GenPWLFunction(keypoints, multi_pwl_weights), + # Initializer exactly matches target function. + "initializer": + lambda shape, dtype: tf.constant(multi_pwl_weights, shape=shape), + "kernel_regularizer": regularizer, + } # pyformat: disable + loss = self._TrainModel(config) + # This loss is pure regularization loss because initializer matches target + # function and there was 0 training epochs. + self.assertAlmostEqual(loss, pure_reg_loss, delta=self._loss_eps) + + config["num_training_epoch"] = 20 + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, training_loss, delta=self._loss_eps) + if units > 1: + config["use_multi_calibration_layer"] = True + config["initializer"] = ( + lambda shape, dtype: tf.constant(pwl_weights, shape=shape)) + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, training_loss, delta=self._loss_eps) + + def testAssertMonotonicity(self): + if self._disable_all: + return + decreasing_initializer = pwl_calibraion.UniformOutputInitializer( + output_min=0.0, output_max=1.0, monotonicity=-1) + # Specify decreasing initializer and do 0 training iterations so no + # projections are being executed. + config = { + "num_training_records": 100, + "num_training_epoch": 0, + "optimizer": tf.keras.optimizers.Adagrad, + "learning_rate": 0.15, + "x_generator": self._ScatterXUniformly, + "y_function": self._SmallWaves, + "monotonicity": 0, + "num_keypoints": 21, + "input_min": 0.0, + "input_max": 1.0, + "output_min": 0.0, + "output_max": 1.0, + "initializer": decreasing_initializer, + } + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.347888, delta=self._loss_eps) + + # We have decreasing initializer so with 0 trainig steps monotonicity is + # violated. + with self.assertRaises(tf.errors.InvalidArgumentError): + config["monotonicity"] = 1 + loss = self._TrainModel(config) + + # Now set upper bound bigger than necessary. Everything should be fine... + config["monotonicity"] = 0 + config["output_max"] = 1.5 + loss = self._TrainModel(config) + self.assertAlmostEqual(loss, 0.347888, delta=self._loss_eps) + + # ... until we require to clamp max. + with self.assertRaises(tf.errors.InvalidArgumentError): + config["clamp_max"] = True + loss = self._TrainModel(config) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensorflow_lattice/python/test_utils.py b/tensorflow_lattice/python/test_utils.py new file mode 100644 index 0000000..c18a566 --- /dev/null +++ b/tensorflow_lattice/python/test_utils.py @@ -0,0 +1,276 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helpers to train simple model for tests and print debug output.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import time +from . import visualization +from absl import logging +import numpy as np + + +class TimeTracker(object): + """Tracks time. + + Keeps track of time spent in its scope and appends it to 'list_to_append' + on exit from scope divided by 'num_steps' if provided. + + Example: + training_step_times = [] + with TimeTracker(training_step_times, num_steps=num_epochs): + model.fit(... epochs=num_epochs ...) + print np.median(training_step_times) + """ + + def __init__(self, list_to_append, num_steps=1): + self._list_to_append = list_to_append + self._num_steps = float(num_steps) + + def __enter__(self): + self._start_time = time.time() + return self + + def __exit__(self, unuesd_type, unuesd_value, unuesd_traceback): + duration = time.time() - self._start_time + self._list_to_append.append( + duration / self._num_steps if self._num_steps else 0.0) + + +def run_training_loop(config, + training_data, + keras_model, + plot_path=None, + input_dtype=np.float32, + label_dtype=np.float32): + """Trains models and prints debug info. + + Args: + config: dictionary of test case parameters. See tests for TensorFlow Lattice + layers. + training_data: tripple: (training_inputs, labels, raw_training_inputs) where + training_inputs and labels are proper data to train models passed via + other parameters and raw_training_inputs are representation of + training_inputs for visualization. + keras_model: Keras model to train on training_data. + plot_path: if specified it should be a string which represents file + name where to save model output vs ground truth visualization as png. + Supported only for 1-d and 2-d inputs. For visualisation of 2-d inputs + to work - raw_training_data must be a mesh grid. + input_dtype: dtype for input conversion. + label_dtype: dtype for label conversion. + + Returns: + Loss measured on training data and tf.session() if one was initialized + explicitly during training. + """ + (training_inputs, training_labels, raw_training_inputs) = training_data + np_training_inputs = np.asarray(training_inputs).astype(input_dtype) + np_training_labels = np.asarray(training_labels).astype(label_dtype) + + logging.info(" {0: <10}{1: <10}".format("it", "Loss")) + + num_steps = 10 + training_step_times = [] + for step in range(num_steps): + begin = (config["num_training_epoch"] * step) // num_steps + end = (config["num_training_epoch"] * (step + 1)) // num_steps + num_epochs = end - begin + if num_epochs == 0: + continue + + loss = keras_model.evaluate(np_training_inputs, np_training_labels, + batch_size=len(np_training_inputs), + verbose=0) + with TimeTracker(training_step_times, num_steps=num_epochs): + keras_model.fit(np_training_inputs, np_training_labels, + batch_size=len(np_training_inputs), + epochs=num_epochs, + verbose=0) + logging.info("{0: <10}{1: <10,.6f}".format(begin, loss)) + # End of: 'for step in range(num_steps):' + + loss = keras_model.evaluate(np_training_inputs, np_training_labels, + batch_size=len(np_training_inputs), + verbose=0) + logging.info("Final loss: %f", loss) + + if training_step_times: + logging.info("Median training step time: %f", + np.median(training_step_times)) + + if plot_path: + predictions = keras_model.predict(np_training_inputs) + plots = { + "Ground truth": training_labels, + "Model": predictions + } + visualization.plot_outputs(inputs=raw_training_inputs, + outputs_map=plots, + file_path=plot_path) + return loss + + +def two_dim_mesh_grid(num_points, x_min, y_min, x_max, y_max): + """Generates uniform 2-d mesh grid for 3-d surfaces visualisation via pyplot. + + Uniformly distributes 'num_points' within rectangle: + (x_min, y_min) - (x_max, y_max) + 'num_points' should be such that uniform distribution is possible. In other + words there should exist such integers 'x_points' and 'y_points' that: + - x_points * y_points == num_points + - x_points / y_points == (x_max - x_min) / (y_max - y_min) + + Args: + num_points: number of points in the grid. + x_min: bounds of the grid. + y_min: bounds of the grid. + x_max: bounds of the grid. + y_max: bounds of the grid. + + Returns: + Tuple containing 2 numpy arrays which represent X and Y coordinates of mesh + grid + + Raises: + ValueError: if it's impossible to uniformly distribute 'num_points' across + specified grid. + + """ + x_size = x_max - x_min + y_size = y_max - y_min + x_points = (num_points * x_size / y_size)**0.5 + y_points = num_points / x_points + + eps = 1e-7 + is_int = lambda x: abs(x - int(x + eps)) < eps + if not is_int(x_points) or not is_int(y_points): + raise ValueError("Cannot evenly distribute %d points across sides of " + "lengths: %f and %f" % (num_points, x_size, y_size)) + + x_grid = np.linspace(start=x_min, stop=x_max, num=int(x_points + eps)) + y_grid = np.linspace(start=y_min, stop=y_max, num=int(y_points + eps)) + + # Convert list returned by meshgrid() to tuple so we can easily distinguish + # mesh grid vs list of points. + return tuple(np.meshgrid(x_grid, y_grid)) + + +def sample_uniformly(num_points, lower_bounds, upper_bounds): + """Deterministically generates num_point random points within bounds. + + Points will be such that: + lower_bounds[i] <= p[i] <= upper_bounds[i] + + Number of dimensions is defined by lengths of lower_bounds list. + + Args: + num_points: number of points to generate. + lower_bounds: list or tuple of lower bounds. + upper_bounds: list or tuple of upper bounds. + + Returns: + List of generated points. + """ + if len(lower_bounds) != len(upper_bounds): + raise ValueError("Lower and upper bounds must have same length. They are: " + "lower_bounds: %s, upper_bounds: %s" % + (lower_bounds, upper_bounds)) + np.random.seed(41) + x = [] + for _ in range(num_points): + point = [ + lower + np.random.random() * (upper - lower) + for lower, upper in zip(lower_bounds, upper_bounds) + ] + x.append(np.asarray(point)) + return x + + +def get_hypercube_interpolation_fn(coefficients): + """Returns function which does hypercube interpolation. + + This is only for 2^d lattice aka hypercube. + + Args: + coefficients: coefficients of hypercube ordered according to index of + corresponding vertex. + + Returns: + Function which takes d-dimension point and performs hypercube interpolation + with given coefficients. + """ + + def hypercube_interpolation_fn(x): + """Does hypercube interpolation.""" + if 2**len(x) != len(coefficients): + raise ValueError("Number of coefficients(%d) does not correspond to " + "dimension 'x'(%s)" % (len(coefficients), x)) + result = 0.0 + for coefficient_index in range(len(coefficients)): + weight = 1.0 + for input_dimension in range(len(x)): + if coefficient_index & (1 << input_dimension): + # If statement checks whether 'input_dimension' bit of + # 'coefficient_index' is set to 1. + weight *= x[input_dimension] + else: + weight *= (1.0 - x[input_dimension]) + result += coefficients[coefficient_index] * weight + return result + + return hypercube_interpolation_fn + + +def get_linear_lattice_interpolation_fn(lattice_sizes, monotonicities, + output_min, output_max): + """Returns function which does lattice interpolation. + + Returned function matches lattice_layer.LinearInitializer with corresponding + parameters. + + Args: + lattice_sizes: list or tuple of integers which represents lattice sizes. + monotonicities: monotonicity constraints. + output_min: minimum output of linear function. + output_max: maximum output of linear function. + + Returns: + Function which takes d-dimension point and performs lattice interpolation + assuming lattice weights are such that lattice represents linear function + with given output_min and output_max. All monotonic dimesions of this linear + function cotribute with same weight despite of numer of vertices per + dimension. All non monotonic dimensions have weight 0.0. + """ + + def linear_interpolation_fn(x): + """Linear along monotonic dims and 0.0 along non monotonic.""" + result = output_min + num_monotonic_dims = len(monotonicities) - monotonicities.count(0) + if num_monotonic_dims == 0: + local_monotonicities = [1] * len(lattice_sizes) + num_monotonic_dims = len(lattice_sizes) + else: + local_monotonicities = monotonicities + + weight = (output_max - output_min) / num_monotonic_dims + for i in range(len(x)): + if local_monotonicities[i]: + result += x[i] * weight / (lattice_sizes[i] - 1.0) + return result + + return linear_interpolation_fn diff --git a/tensorflow_lattice/python/utils.py b/tensorflow_lattice/python/utils.py new file mode 100644 index 0000000..24cec35 --- /dev/null +++ b/tensorflow_lattice/python/utils.py @@ -0,0 +1,174 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Internal helpers shared by multiple modules in TFL. + +Note that this module is not expected to be used by TFL users, and that it is +not exposed in the TFL package. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import tensorflow as tf + + +def _topological_sort(key_less_than_values): + """Topological sort for monotonicities. + + Args: + key_less_than_values: A defaultdict from index to a list of indices, such + that for j in key_less_than_values[i] we must have output(i) <= output(j). + + Returns: + A topologically sorted list of indices. + + Raises: + ValueError: If monotonicities are circular. + """ + all_values = set() + for values in key_less_than_values.values(): + all_values.update(values) + + q = [k for k in key_less_than_values if k not in all_values] + if not q: + raise ValueError( + "Circular monotonicity constraints: {}".format(key_less_than_values)) + + result = [] + seen = set() + while q: + v = q[-1] + seen.add(v) + expand = [x for x in key_less_than_values[v] if x not in seen] + if not expand: + result = [v] + result + q.pop() + else: + q.append(expand[0]) + + return result + + +def _min_projection(weights, sorted_indices, key_less_than_values, step): + """Returns an approximate partial min projection with the given step_size. + + Args: + weights: A list of tensors of shape `(units,)` to be approximatly projected + based on the monotonicity constraints. + sorted_indices: Topologically sorted list of indices based on the + monotonicity constraints. + key_less_than_values: A defaultdict from index to a list of indices, such + that for `j` in `key_less_than_values[i]` we must have + `weight[i] <= weight[j]`. + step: A value defining if we should apply a full projection (`step == 1`) or + a partial projection (`step < 1`). + + Returns: + Projected list of tensors. + """ + projected_weights = list(weights) # copy + for i in sorted_indices[::-1]: + if key_less_than_values[i]: + min_projection = projected_weights[i] + for j in key_less_than_values[i]: + min_projection = tf.minimum(min_projection, projected_weights[j]) + if step == 1: + projected_weights[i] = min_projection + else: + projected_weights[i] = ( + step * min_projection + (1 - step) * projected_weights[i]) + return projected_weights + + +def _max_projection(weights, sorted_indices, key_greater_than_values, step): + """Returns an approximate partial max projection with the given step_size. + + Args: + weights: A list of tensors of shape `(units,)` to be approximatly projected + based on the monotonicity constraints. + sorted_indices: Topologically sorted list of indices based on the + monotonicity constraints. + key_greater_than_values: A defaultdict from index to a list of indices, + indicating that for index `j` in `key_greater_than_values[i]` we must have + `weight[i] >= weight[j]`. + step: A value defining if we should apply a full projection (`step == 1`) or + a partial projection (`step < 1`). + + Returns: + Projected list of tensors. + """ + projected_weights = list(weights) # copy + for i in sorted_indices: + if key_greater_than_values[i]: + max_projection = projected_weights[i] + for j in key_greater_than_values[i]: + max_projection = tf.maximum(max_projection, projected_weights[j]) + if step == 1: + projected_weights[i] = max_projection + else: + projected_weights[i] = ( + step * max_projection + (1 - step) * projected_weights[i]) + return projected_weights + + +def approximately_project_categorical_partial_monotonicities(weights, + monotonicities): + """Returns an approximation L2 projection for categorical monotonicities. + + Categorical monotonocities are monotonicity constraints applied to the real + values that are mapped from categorical inputs. Each monotonicity constraint + is specified by a pair of categorical input indices. The projection is also + used to constrain pairs of coefficients in linear models. + + Args: + weights: Tensor of weights to be approximately projected based on the + monotonicity constraints. + monotonicities: List of pairs of indices `(i, j)`, indicating constraint + `weights[i] <= weights[j]`. + """ + key_less_than_values = collections.defaultdict(list) + key_greater_than_values = collections.defaultdict(list) + for i, j in monotonicities: + key_less_than_values[i].append(j) + key_greater_than_values[j].append(i) + + sorted_indices = _topological_sort(key_less_than_values) + + projected_weights = tf.unstack(weights) + + # A 0.5 min projection followed by a full max projection. + projected_weights_min_max = _min_projection(projected_weights, + sorted_indices, + key_less_than_values, 0.5) + projected_weights_min_max = _max_projection(projected_weights_min_max, + sorted_indices, + key_greater_than_values, 1) + projected_weights_min_max = tf.stack(projected_weights_min_max) + + # A 0.5 max projection followed by a full min projection. + projected_weights_max_min = _max_projection(projected_weights, + sorted_indices, + key_greater_than_values, 0.5) + projected_weights_max_min = _min_projection(projected_weights_max_min, + sorted_indices, + key_less_than_values, 1) + projected_weights_max_min = tf.stack(projected_weights_max_min) + + # Take the average of the two results to avoid sliding to one direction. + projected_weights = (projected_weights_min_max + + projected_weights_max_min) / 2 + return projected_weights diff --git a/tensorflow_lattice/python/utils_test.py b/tensorflow_lattice/python/utils_test.py new file mode 100644 index 0000000..15efa3e --- /dev/null +++ b/tensorflow_lattice/python/utils_test.py @@ -0,0 +1,51 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for Tensorflow Lattice utility functions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import numpy as np +import tensorflow as tf +from tensorflow_lattice.python import utils + + +class UtilsTest(parameterized.TestCase, tf.test.TestCase): + + def _ResetAllBackends(self): + tf.compat.v1.reset_default_graph() + + @parameterized.parameters( + ([3., 4.], [(0, 1)], [3., 4.]), + ([4., 3.], [(0, 1)], [3.5, 3.5]), + ([1., 0.], [(0, 1)], [0.5, 0.5]), + ([-1., 0.], [(1, 0)], [-0.5, -0.5]), + ([4., 3., 2., 1., 0.], [(0, 1), (1, 2), (2, 3), (3, 4)], + [2., 2., 2., 2., 2.])) + def testApproximatelyProjectCategoricalPartialMonotonicities( + self, weights, monotonicities, expected_projected_weights): + self._ResetAllBackends() + weights = tf.Variable(weights) + projected_weights = ( + utils.approximately_project_categorical_partial_monotonicities( + weights, monotonicities)) + self.evaluate(tf.compat.v1.global_variables_initializer()) + self.assertAllClose(self.evaluate(projected_weights), + np.array(expected_projected_weights)) + +if __name__ == '__main__': + tf.test.main() diff --git a/tensorflow_lattice/python/visualization.py b/tensorflow_lattice/python/visualization.py new file mode 100644 index 0000000..7cd8580 --- /dev/null +++ b/tensorflow_lattice/python/visualization.py @@ -0,0 +1,524 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools to analyse and plot TFL models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import os +import tempfile + +from . import model_info +import matplotlib.pyplot as plt +# Needed for pyplot 3d projections. +from mpl_toolkits.mplot3d import Axes3D as _ # pylint: disable=unused-import +import numpy as np + + +def draw_model_graph(model_graph, calibrator_dpi=30): + """Draws the model graph. + + This function requires IPython and graphviz packages. + + ``` + model_graph = estimators.get_model_graph(saved_model_path) + visualization.draw_model_graph(model_graph) + ``` + + Args: + model_graph: a `model_info.ModelInfo` objects to plot. + calibrator_dpi: The DPI for calibrator plots inside the graph nodes. + """ + import graphviz # pylint: disable=g-import-not-at-top + import IPython.display # pylint: disable=g-import-not-at-top + + dot = graphviz.Digraph(format='png', engine='dot') + dot.graph_attr['ranksep'] = '0.75' + + # Check if we need split nodes for shared calibration + model_has_shared_calibration = False + for node in model_graph.nodes: + model_has_shared_calibration |= ( + (isinstance(node, model_info.PWLCalibrationNode) or + isinstance(node, model_info.CategoricalCalibrationNode)) and + (len(_output_nodes(model_graph, node)) > 1)) + + split_nodes = {} + for node in model_graph.nodes: + node_id = _node_id(node) + if (isinstance(node, model_info.PWLCalibrationNode) or + isinstance(node, model_info.CategoricalCalibrationNode)): + # Add node for calibrator with calibrator plot inside. + fig = plot_calibrator_nodes([node]) + filename = os.path.join(tempfile.tempdir, 'i{}.png'.format(node_id)) + plt.savefig(filename, dpi=calibrator_dpi) + plt.close(fig) + dot.node(node_id, '', image=filename, shape='box') + + # Add input feature node. + node_is_feature_calibration = isinstance(node.input_node, + model_info.InputFeatureNode) + if node_is_feature_calibration: + input_node_id = node_id + 'input' + dot.node(input_node_id, node.input_node.name) + dot.edge(input_node_id + ':s', node_id + ':n') + + # Add split node for shared calibration. + if model_has_shared_calibration: + split_node_id = node_id + 'calibrated' + split_node_name = 'calibrated {}'.format(node.input_node.name) + dot.node(split_node_id, split_node_name) + dot.edge(node_id + ':s', split_node_id + ':n') + split_nodes[node_id] = (split_node_id, split_node_name) + + elif not isinstance(node, model_info.InputFeatureNode): + dot.node(node_id, _node_name(node), shape='box', margin='0.3') + + if node is model_graph.output_node: + output_node_id = node_id + 'output' + dot.node(output_node_id, 'output') + dot.edge(node_id + ':s', output_node_id) + + for node in model_graph.nodes: + node_id = _node_id(node) + for input_node in _input_nodes(node): + if isinstance(input_node, model_info.InputFeatureNode): + continue + input_node_id = _node_id(input_node) + if input_node_id in split_nodes: + split_node_id, split_node_name = split_nodes[input_node_id] + input_node_id = split_node_id + node_id + dot.node(input_node_id, split_node_name) + + dot.edge(input_node_id + ':s', node_id) # + ':n') + + filename = os.path.join(tempfile.tempdir, 'dot') + dot.render(filename) + IPython.display.display(IPython.display.Image('{}.png'.format(filename))) + + +def plot_calibrator_nodes(nodes, + plot_submodel_calibration=True, + font_size=12, + axis_label_font_size=14, + figsize=None): + """Plots feature calibrator(s) extracted from a TFL canned estimator. + + Args: + nodes: List of calibrator nodes to be plotted. + plot_submodel_calibration: If submodel calibrators should be included in the + output plot, when more than one calibration node is provided. These are + individual calibration layers for each lattice in a lattice ensemble + constructed from `configs.CalibratedLatticeEnsembleConfig`. + font_size: Font size for values and labels on the plot. + axis_label_font_size: Font size for axis labels. + figsize: The figsize parameter passed to `pyplot.figure()`. + + Returns: + Pyplot figure object containing the visualisation. + """ + + with plt.style.context('seaborn-whitegrid'): + plt.rc('font', size=font_size) + plt.rc('axes', titlesize=font_size) + plt.rc('xtick', labelsize=font_size) + plt.rc('ytick', labelsize=font_size) + plt.rc('legend', fontsize=font_size) + plt.rc('axes', labelsize=axis_label_font_size) + fig = plt.figure(figsize=figsize) + axes = fig.add_subplot(1, 1, 1) + if isinstance(nodes[0], model_info.PWLCalibrationNode): + _plot_pwl_calibrator(nodes, axes, plot_submodel_calibration) + elif isinstance(nodes[0], model_info.CategoricalCalibrationNode): + _plot_categorical_calibrator(nodes, axes, plot_submodel_calibration) + else: + raise ValueError('Unknown calibrator type: {}'.format(nodes[0])) + plt.tight_layout() + + return fig + + +def plot_feature_calibrator(model_graph, + feature_name, + plot_submodel_calibration=True, + font_size=12, + axis_label_font_size=14, + figsize=None): + """Plots feature calibrator(s) extracted from a TFL canned estimator. + + ``` + model_graph = estimators.get_model_graph(saved_model_path) + visualization.plot_feature_calibrator(model_graph, "feature_name") + ``` + + Args: + model_graph: `model_info.ModelGraph` object that includes model nodes. + feature_name: Name of the feature to plot the calibrator for. + plot_submodel_calibration: If submodel calibrators should be included in the + output plot, when more than one calibration node is provided. These are + individual calibration layers for each lattice in a lattice ensemble + constructed from `configs.CalibratedLatticeEnsembleConfig`. + font_size: Font size for values and labels on the plot. + axis_label_font_size: Font size for axis labels. + figsize: The figsize parameter passed to `pyplot.figure()`. + + Returns: + Pyplot figure object containing the visualisation. + """ + + input_feature_node = [ + input_feature_node + for input_feature_node in _input_feature_nodes(model_graph) + if input_feature_node.name == feature_name + ] + if not input_feature_node: + raise ValueError( + 'Feature "{}" not found in the model_graph.'.format(feature_name)) + + input_feature_node = input_feature_node[0] + calibrator_nodes = _output_nodes(model_graph, input_feature_node) + return plot_calibrator_nodes(calibrator_nodes, plot_submodel_calibration, + font_size, axis_label_font_size, figsize) + + +def plot_all_calibrators(model_graph, num_cols=4, **kwargs): + """Plots all feature calibrator(s) extracted from a TFL canned estimator. + + The generated plots are arranged in a grid. + This function requires IPython and colabtools packages. + + ``` + model_graph = estimators.get_model_graph(saved_model_path) + visualization.plot_all_calibrators(model_graph) + ``` + + Args: + model_graph: a `model_info.ModelGraph` objects to plot. + num_cols: Number of columns in the grid view. + **kwargs: args passed to `analysis.plot_calibrators`. + """ + import google.colab.widgets # pylint: disable=g-import-not-at-top + import IPython.display # pylint: disable=g-import-not-at-top + + feature_infos = _input_feature_nodes(model_graph) + feature_names = sorted([feature_info.name for feature_info in feature_infos]) + + output_calibrator_node = ( + model_graph.output_node if isinstance( + model_graph.output_node, model_info.PWLCalibrationNode) else None) + + num_feature_calibrators = len(feature_names) + num_output_calibrators = 1 if output_calibrator_node else 0 + + # Calibrator plots are organized in a grid. We first plot all the feature + # calibrators, followed by any existing output calibrator. + num_rows = int( + math.ceil( + float(num_feature_calibrators + num_output_calibrators) / num_cols)) + for index, _ in enumerate( + google.colab.widgets.Grid( + num_rows, num_cols, style='border-top: 0; border-bottom: 0;')): + if index >= num_feature_calibrators + num_output_calibrators: + continue # Empty cells + + if index < num_feature_calibrators: + feature_name = feature_names[index] + tb = google.colab.widgets.TabBar( + ['Calibrator for "{}"'.format(feature_name), 'Large Plot']) + else: + feature_name = 'output' + tb = google.colab.widgets.TabBar(['Output calibration', 'Large Plot']) + + with tb.output_to(0, select=True): + if index < len(feature_names): + plot_feature_calibrator(model_graph, feature_name, **kwargs) + else: + plot_calibrator_nodes([output_calibrator_node]) + filename = os.path.join(tempfile.tempdir, '{}.png'.format(feature_name)) + # Save a larger temporary copy to be shown in a second tab. + plt.savefig(filename, dpi=200) + plt.show() + with tb.output_to(1, select=False): + IPython.display.display(IPython.display.Image(filename)) + + +def _input_feature_nodes(model_graph): + return [ + node for node in model_graph.nodes + if isinstance(node, model_info.InputFeatureNode) + ] + + +def _node_id(node): + return str(id(node)) + + +def _node_name(node): + if isinstance(node, model_info.LinearNode): + return 'Linear' + if isinstance(node, model_info.LatticeNode): + return 'Lattice' + if isinstance(node, model_info.MeanNode): + return 'Average' + return str(type(node)) + + +def _contains(nodes, node): + return any(other_node is node for other_node in nodes) + + +def _input_nodes(node): + if hasattr(node, 'input_nodes'): + return node.input_nodes + if hasattr(node, 'input_node'): + return [node.input_node] + return [] + + +def _output_nodes(model_graph, node): + return [ + other_node for other_node in model_graph.nodes + if _contains(_input_nodes(other_node), node) + ] + + +_MISSING_NAME = 'missing' +_CALIBRATOR_COLOR = 'tab:blue' +_MISSING_COLOR = 'tab:orange' + + +def _plot_categorical_calibrator(categorical_calibrator_nodes, axes, + plot_submodel_calibration): + """Plots a categorical calibrator. + + + Creates a categorical calibraiton plot combining the passed in calibration + nodes. You can select to also show individual calibrator nodes in the plot. + + Args: + categorical_calibrator_nodes: a list of + `model_info.CategoricalCalibrationNode` objects in a model graph. If more + that one node is provided, they must be for the same input feature. + axes: Pyplot axes object. + plot_submodel_calibration: If submodel calibrators should be included in the + output plot, when more than one calibration node is provided. These are + individual calibration layers for each lattice in a lattice ensemble + constructed from `configs.CalibratedLatticeEnsembleConfig`. + """ + feature_info = categorical_calibrator_nodes[0].input_node + assert feature_info.is_categorical + + # Adding missing category to input values. + # Note that there might be more than one out-of-vocabulary value + # (i.e. (num_oov_buckets + (default_value is not none)) > 1), in which case + # we name all of them missing. + input_values = list(feature_info.vocabulary_list) + while len(input_values) < len(categorical_calibrator_nodes[0].output_values): + input_values.append(_MISSING_NAME) + + submodels_output_values = [ + node.output_values for node in categorical_calibrator_nodes + ] + mean_output_values = np.mean(submodels_output_values, axis=0) + + # Submodels categorical outputs are plotted in grouped form inside the + # average calibration bar. + bar_width = 0.8 + sub_width = bar_width / len(submodels_output_values) + + # Bar colors for each category. + color = [ + _MISSING_COLOR if v == _MISSING_NAME else _CALIBRATOR_COLOR + for v in input_values + ] + + # Plot submodel calibrations fitting inside the average calibration bar. + x = np.arange(len(input_values)) + if plot_submodel_calibration: + for sub_index, output_values in enumerate(submodels_output_values): + plt.bar( + x - bar_width / 2 + sub_width / 2 + sub_index * sub_width, + output_values, + width=sub_width, + alpha=0.1, + color=color, + linewidth=0.5) + + # Plot average category output. + plt.bar( + x, + mean_output_values, + color=color, + linewidth=2, + alpha=0.2, + width=bar_width) + plt.bar( + x, + mean_output_values, + fill=False, + edgecolor=color, + linewidth=3, + width=bar_width) + + # Set axes labels and tick values. + plt.xlabel(feature_info.name) + plt.ylabel('calibrated {}'.format(feature_info.name)) + axes.set_xticks(x) + axes.set_xticklabels(input_values) + axes.yaxis.grid(True, linewidth=0.25) + axes.xaxis.grid(False) + + +def _plot_pwl_calibrator(pwl_calibrator_nodes, axes, plot_submodel_calibration): + """Plots a PWL calibrator. + + Creates a pwl plot combining the passed in calibration nodes. You can select + to also show individual calibrator nodes in the plot. + + Args: + pwl_calibrator_nodes: a list of `model_info.PWLCalibrationNode` objects in a + model graph. If more that one node is provided, they must be for the same + input feature. + axes: Pyplot axes object. + plot_submodel_calibration: If submodel calibrators should be included in the + output plot, when more than one calibration node is provided. These are + individual calibration layers for each lattice in a lattice ensemble + constructed from `configs.CalibratedLatticeEnsembleConfig`. + """ + + pwl_calibrator_node = pwl_calibrator_nodes[0] + if isinstance(pwl_calibrator_node.input_node, model_info.InputFeatureNode): + assert not pwl_calibrator_node.input_node.is_categorical + input_name = pwl_calibrator_node.input_node.name + output_name = 'calibrated {}'.format(input_name) + else: + # Output PWL calibration. + input_name = 'input' + output_name = 'output' + + # Average output_keypoints and (any) default_output across all the nodes. + mean_output_keypoints = np.mean( + [ + pwl_calibrator_node.output_keypoints + for pwl_calibrator_node in pwl_calibrator_nodes + ], + axis=0, + ) + if pwl_calibrator_node.default_output: + mean_default_output = np.mean([ + pwl_calibrator_node.default_output + for pwl_calibrator_node in pwl_calibrator_nodes + ]) + else: + mean_default_output = None + + if plot_submodel_calibration: + for pwl_calibrator_node in pwl_calibrator_nodes: + plt.plot( + pwl_calibrator_node.input_keypoints, + pwl_calibrator_node.output_keypoints, + '--', + linewidth=0.25, + color=_CALIBRATOR_COLOR) + if pwl_calibrator_node.default_output is not None: + plt.plot( + pwl_calibrator_node.input_keypoints, + [pwl_calibrator_node.default_output] * + len(pwl_calibrator_node.input_keypoints), + '--', + color=_MISSING_COLOR, + linewidth=0.25) + + plt.plot( + pwl_calibrator_node.input_keypoints, + mean_output_keypoints, + _CALIBRATOR_COLOR, + linewidth=3, + label='calibrated') + if mean_default_output is not None: + plt.plot( + pwl_calibrator_node.input_keypoints, + [mean_default_output] * len(pwl_calibrator_node.input_keypoints), + color=_MISSING_COLOR, + linewidth=3, + label=_MISSING_NAME) + + plt.xlabel(input_name) + plt.ylabel(output_name) + axes.yaxis.grid(True, linewidth=0.25) + axes.xaxis.grid(True, linewidth=0.25) + axes.legend() + + +def plot_outputs(inputs, outputs_map, file_path=None, figsize=(20, 20)): + """Visualises several outputs for same set of inputs. + + This is generic plotting helper not tied to any layer. + Can visualize either: + - 2-d graphs: 1-d input, 1-d output. + - 3-d surfaces: 2-d input, 1-d output. + + Args: + inputs: one of: + - ordered list of 1-d points + - tuple of exactly 2 elements which represent X and Y coordinates of 2-d + mesh grid for pyplot 3-d surface visualization. See + `test_utils.two_dim_mesh_grid` for more details. + outputs_map: dictionary {name: outputs} where "outputs" is a list of 1-d + points which correspond to "inputs". "name" is an arbitrary string used as + legend. + file_path: if set - visualisation will be saved as png at specified + location. + figsize: The figsize parameter passed to `pyplot.figure()`. + + Raises: + ValueError: if configured to visualise more than 4 3-d plots. + + Returns: + Pyplot object containing visualisation. + """ + plt.clf() + legend = [] + + if isinstance(inputs, tuple): + figure = plt.figure(figsize=figsize) + axes = figure.gca(projection='3d') + # 4 colors is enough because no one would ever think of drawing 5 or more + # 3-d surfaces on same graph due to them looking like fabulous mess anyway. + colors = ['dodgerblue', 'forestgreen', 'saddiebrown', 'lightsalmon'] + if len(outputs_map) > 4: + raise ValueError('Cannot visualize more than 4 3-d plots.') + + x_inputs, y_inputs = inputs + for i, (name, outputs) in enumerate(outputs_map.items()): + legend.append(name) + z_outputs = np.reshape( + np.asarray(outputs), newshape=(len(x_inputs), len(x_inputs[0]))) + + axes.plot_wireframe(x_inputs, y_inputs, z_outputs, color=colors[i]) + else: + for name, outputs in sorted(outputs_map.items()): + legend.append(name) + plt.plot(inputs, outputs) + + plt.ylabel('y') + plt.xlabel('x') + + plt.legend(legend) + if file_path: + plt.savefig(file_path) + return plt diff --git a/tensorflow_lattice/tensorflow_lattice.bzl b/tensorflow_lattice/tensorflow_lattice.bzl deleted file mode 100644 index ae608c0..0000000 --- a/tensorflow_lattice/tensorflow_lattice.bzl +++ /dev/null @@ -1,42 +0,0 @@ -# -*- Python -*- - -# Copyright 2017 The TensorFlow Lattice Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Bazel macros for TensorFlow Lattice.""" - -def _add_tf_search_path(prefix, levels_to_root): - root = "%s/%s" % (prefix, "/".join([".."] * (levels_to_root + 1))) - tf_root = "%s/external/org_tensorflow/tensorflow" % root - return "-rpath,%s" % tf_root - -def rpath_linkopts(name): - """Add proper rpath_linkopts to the build rule. - - This function adds tensorflow root to rpath for Darwin builds. - - Args: - name: Name of the target. - - Returns: - rpath linker options. - """ - levels_to_root = native.package_name().count("/") + name.count("/") - return select({ - "@org_tensorflow//tensorflow:macos": [ - "-Wl,%s" % (_add_tf_search_path("@loader_path", levels_to_root),), - ], - "//conditions:default": [ - ], - })

{m&V~(>%p7xz@#W(DM;`m_+MemYhTes5xl9zNchQLMnRQ! zB~rih_M*S3Vquil^5_AkzZf~kKB=NSM5aW%QHk^8VP1~&xL_0yTS8cFH#c20)!>T- z+Qe`q;W3)I7vKspYjR=7T1`Go=Knn%;I4~@1ih^k99hX}hV#M9ZMmQBqCB05C49-|QJUq|-TLA5IS@zmqb0zoN9OXm< zu(3GUh=8QQ2UK>mIHw zwvuty&aRmu*bt-bQi1A```vGQ2c%ur<;>9G9Y4&--dC!U^~`dV3e*v*JTH8+29 z#GgosCA3WmP^)sG+FSUtDXkLX|5lVzR^cXutOl(~9G%xeIS&UGzy;oYsQJ`06YwRt z0BAZUaU`fP z1a}dxN-m7>a!!NwYDo5FNKuGok2tqUR#a;ir9v+3{4w>o3 z48DbHLr9FXmiX+@J0Xlj^GTYA+Q9FnyqJH<_v0XCQfi;fjFy8Sf3JkPJLKEVz*(j^ z9O9s$Ej>E9MrmukE72U>fB`EWJ1%*ao#%U&l&DE>RW$O0Mw1?==MJPP?IvV!mj<3@ zY$rRgZ|u+2UCUY+2`EV?Br(@6y;}}yhrt*!9v&Xh@co4=A|)kITKeCj&_EFwVy^gW z9aCt5EhY>AohYJLVZb0L-$T`-b89%|9CVn@7hT5Pz*OX|tn!C`Z}llzr0 z(iTHb^uqauFuJ9jE+^;<2-kN*u@~8_KhTk!VL2^|3X6*lMN7!u zUeyHc|Eqwe8hChnl&WBtXj=a0CFCqq?Wj})4e03TEV@oBya8mP_H#vFQHmFao>V}Q z;z-#GWvg4&JTfG$$JFJ}&Bw0-IpwI^=KQNFUJ3vIV2WW*h3ofeoBb?wTFX#;kIJh8 z;!AyEH=DSPb_@NZd@E4{(*mVUHQG_2f)56A4358aEZl}t-}RaY-{s&5-*^nRwp(e@w2*;l3j~ndK=L?(5`k6gj-Z`h9!5NKCqPNp@JC*!t>row46k@j7QgwjU!yfragD~ei18y zf@y3_yFZz}(rb9ft!v5T?b-5WtU_QSD5n6PK5^GYIS!-s7XjAz1q5D_C-T)oPVh7W zjkkoKGUwHB;{e!~&^Pa%be|!Ff#v-=1!0|uMX0Ci7b9NpoX;`ojP6=is4t+x*;*?mJ&i9y{9i5wjJhYiVzUv1qf( z=rO~S`K^rZOH=3YSJXBW$k{c{`Tqkg4BoOLe0)m$U?8yg$Rb@tJG+8LL^N>4T3S>T z`aCBFEN^l^xOMy8{Ctf(++GTwbi-!x{+Yyrc^iWxd|#Mbta~;!op5Csh_0b*0Iwv5 z`Q&BD_1Cs}U60e98}G8lFjgdkAiem*sBH72>fmfuXvRnAP!YIw##+4&MHaPRRw+B_ zl<9Hb6MVWbt##p;B%Pbl+xLSK3Tv*k+1q5QDl}21(g_U{luW{4ecI-K_$1Cr7a76z z1QO`RaMI-7N&hhnPUD4Ua#v6saK)pnW0#UU=w3d-A^g=?+WleAZd*n3TVPzpZ~BO1 zv`;oLw|`J%YcWMJV$+fVR_r|X%KJ{PPSv%X3NxzcBIO9}XT zpjmQ%%j z_RzAiRIhGf(#6RiHQdS-GH(0GrU@*Y{FlokjgCBQ5tNc#2$`!oEDVvigR1{9;^7ld z;~7s+8&Pf^NQXzDm2{`aMxM>NdohTlI2hqAw|nJ6m8?<#mBSRhrqt4Rg15+YdRr1T zpLTWzFLoWey0=f8;}1vC$)M8+^NT$&_5~zgYNF~j*JYRSq5$Uy!la7^t{fa>Pp`&8 zYNS{Zn9|hE+Z@T`$6OD}li%byxa=fsr~H+#Ep&K%tCvwij;2UMqj@;5IfoS-DhS^E zwJO_oOUEl_a9qH>HlT-ALL4rzxf=sg8|T(zkYb` zOX6@kkIkpP_YB^|OFc7HD2ASIYfF*$a86E)D*hi)UlmYSlmr>v-66QUTOhc5aF^i0 z-Q9vqaCbs*hu|LE!b2YJ?iy?_vopKDeDUt-)2F+-x=I0b>>wl*w2eZDjlH%nb?R&T>M}b;IK6(`mOzT|KtfdRCduh%IWPmZ0@UPsvX5 zVNDXkS{i}3`g(s~NQ zB*#|PLs-Fw#0iT~>}kOY7gC>CkhS4Z^JP1yBqLE3Ge-BV1-w{874bf0x`m#F(ejga zJi{J&ZrM0(p(gD@JMV_eV@}8pU>|SApKQm!LQ?8X!f9)3d%jBR97vo(a226}>7i)vAuCBx#DzlRSCIr#?!%$?dRLgW( z&Bjzfh%w6ZVNDX6P+))%bY$z!UY>~kTAhG^T#4#C<@V-iKDwg%_piC3RbYkuot(_O z>WT$$&`ukj;;AE`q++mR@DVt_6}S>J=}~x{@@v<9T+4*I-WQolqt@ZCwUY*$hR}OH zy8Wz3e10J~m^=St$oW)#q6;B}SL}0F!>+Fjg9HN7J*t5>`Ns%lxq=SV9ihvm(%PXk z@XO7lQj!d@(!W$_Hf$B$O#97H|xnm{ONayh6#($SUBg!+z4UFczfa(j!&COKkn=x*1H(*;J5B@XkSxz zTovwAX%#eL)2+U-!`5DKraRT`ymt2`BgH=hi}D2>hALmW-~42ua4YB0WhT#9FF!4w zC#@O;Erbz4M~_OhPuwcdTfzHSNB+KS<+_>`&>R*0gWY^2T{k}^B@fbWp=PLS>X4rG zVzy$b?75tzYr+t9$c&KThR6@o+FbU9g5qE)xqo<*I)X6Nqwxxeorj)Is4Y78MVvjv!G|5*=EGWUBtyB z;YjpUzw%k23r((W(6$I!Pt$^7vfX%87FbhzLVj{_)pJMg2k7k zWLZ+aO{?-^2UJ@g(C zCKagfak9bQtHEIcQ&`0{b}RCsx&Rs&T<}Wg)FWszf=WSL)QQFU^tEvP#9ogH#W0%F^b1=&{rBiN-;hXNbq58d5;toDga z`taTD#LidL>#eZ1&67HgXQQxd&2~hVjO=J8BY7VSZgfq}qT)Sgp$57T0okP(Gb_Iq- zjb~_sFp4HJjXw-35f1Wb4}TzODAu;2p6F(8Art%1eSe%0+}Yz8$S<&(v;1Jp@`Mch zp?D$wRBwSC35mVlBA?oZ&4Ai?OazHp6s@M`%OhEH>SH=f#c}KR)Iv!R#W;L;0F%^) zzC=9=LnZ0OGVX9^R$w8`7(E7BGm#g@jv&uBy!9KJkIwAJ< zJNx!VBNv}6Xn79DmX6AtR&4tp$`W*Z^*_ z#d!LR^@MtH@_YaLuJy-Q(~6}YfaxA$2` zh_4~u>{ZuM02&IBY?XOQosq6)T|fpAU0%%Xy80aT6*}xNX>3@;FteE4>}n*Ot}0Q} zxBA1tb~@6YSS=1R)KHF%8(Ga_)QTa5673>!V2>k8FY+@!0i7tH-vP5Y29DlL61P<^ z_4f_c(CNyPuT)knNnUTUi996ZR6y(ew^JP?v!;iFgMz83y-S5Whv@HMlhE-SS?UJ+ zW<2R(6>={5FC2^wBlWSnh*{o1ft6T&7ou30qlHn*-y#6-;usmleZB#dk*kvgox68# z-?xJ5w&z)WV7LlqXsgo4yp8^VHn1dAPZ#__VN!KOzrFv*1;|{LPGy8Dn~>$hC=gnc zq4NqHYY!AtW+A8!Np?aCt3>UX%yH{dr8nb(yu@#8Zgks=l}aSi6ce~BEBfX~tpP;sFk21TkW5$=>^ z0El_3NNP2X&_Maj|O9cjTSk>=*+XV)uBxfJd|;9j(Fk0ejsdY4aK+DST-@M z-oXtbyVWkWaJWe#D*QREX+|P0DtfgGS!=ydZWM!}_x!>i+N_g?G(}7C!tun6aof(j+>tDa}V3JMy`HqxFxB>wIqwAsc%{UM`Xms{6_*kuBM+u5l=r`tn;RSsE*COZ2d^~lSRW5TdqwC+%@ss2H^ z0R0)oPM-en0y&n`PP+q{uaK2JBqaOoqMzNLCIXx&pAD!GvN9qAES(W%Fvj6XX%&efQ@@M4pj?U(zR~2=Jc9 zQ_X9NCO_@$hr70iIw#?BC00kR3!$OF2F0pM8$DAR;ws{Dq~opTR2$ zQ9tl|DtQcDsRxx0Y)tPbhodaZ(;w9k@cPYuM-Cr@hY<|R@(~y*y+lfT3N)XAd|%c9 z7r?qyOHkd4%+e0}W?zx5|BLdrL#Hcn&y(>n_hWIgEvn{8Q!IKgXMM!gDMT8k;{)0! z!#5EGt(&%V9Vx^_E1zze{K}H-D-dyUagfATQ`;fl_YKxVxOt6QH>}WLP4P{UPleK~ zj$ULK(ma2~OhdTS!AMXMB1(pqq^KoWKdFn~p?x|7T?!gaBbI)?bIWAy2jsj>1ZV9YEqP}Bmjr>{~8kc~!x%2J3R$j8s zka5a2Go7sfs8cS^Hrwin`VMuJ%VZNa7L*h3wrH##xzo|B3Dp)4>i=aITx_)crs z681u+f2cLJgL8lNI^LRUxvse0lIyrKViH@AeW(j4qVS8)a_8#X&+_2OxkL=d)lJVSq&$z1rd_@l*EW@cu=$DJSV7FIR>w-tOJ ziGTi>7FGUyiw$TZ$pgWeb%eTls7f>N15*(mnQQ`OA2kWvT>G9jDOj;-`eR*vvX8lV z>D2FnoGXZfr#w56~myXiL^UZeDLY_*ANVGDY9A41eDu!o36l# z5<3V}_pmS+D?*Oc53BK;lPf-3eQ}3$M;HFErkG`l=q<8CH$h`V@R+imCr~55>_9xi z>4k~M5w{u_Z$e3p64WEf5+sJf^+3EiV~+g^ZAn8ie2Cxai-tjsK%CBr=_{w>G3hS= z-!sbAjr}JJvmJW0=un2+;ne-Wn4@+h#Lys9J6|EH~`Z&YBd(@ zY@uJexIJ$7YfMO=6T+k=X`n3$*aSz%=Q z>q8^9FH4<(F|u)s4uts!dh*Q0>qle#fDr;}m2^~ZJL=hE$#@BL6gQtKW?HnG7HxyOpf(pg+^YhU zXWOLAPyhy!s9$B`luftstx(hVFmz<4^Jq1J4v9!6txtn%&~Am8Kbm{$(5)XR1~%D0|bh#I~|zIkc0zV)zNo;R{Zz)MG#CYY(ts49IzXEt{BQLq2QWMg@k+4|gry ztJd4O<=$k@SSGR9h-E<4xu$~94W<5Bli9T0?mP+2m>UE|mLd{xQj>m2 zaKIx_=xPU+3UY0%=#Idz3po0H;W-}-+4~TjLUW&11fY30j_;(vVoJ&7XOd-UtP|U<*5WA4O^w2)$Rzm zH@Xbp>K#2{+-xDtS>p$CDyZ8&SuZh@%l;7l~f_i-hj}(wK(}5 z%lr!hIrpRR<~OCiIEHFg=?>b|bPn~S3C9d?A2vFrK)Ebe)qLKC*|4EhM5eDjP6}*) zfoc9(9zJ?%1s+}E9sQNV4dJwW0VQ0AZe^JVHpA`1$0&fu z?uif0fzI#stBW07uGi*zv*|I(MBHRFjLk1IzqX~g7LG4xcpv|NrR1vE6lQNRLXt9W zAHeO~^d0#djoM|*p~!|(dy1hAt~W<|H`-b-@E|VBan~)IV8x9559a!%NZMtDI?Z*t zb#(KZFP{o-dD0#=*_|=LQaNy$Z~&y3P6UKrwfPqxb+Mj@90|Fh=SZMle{hp`+M){i5{ z&ok=2HBVbo!72>F$yc=@!}QiCDNVMqc$PY+46v4W!&!MDHlwb#G_{={A2*-SQ7IQ{ zq>u^f?o+f>#pY3W;t(C%Fhat~!ea$DTv#6!>it)H_xIiV4mb;n9iEvGw6AlM%K?U@nJKz% z2cn3G_avOOXjNJc$G;8#lgepWURZlCFxd9FN3s`;4^>;=ggmd^MsA`_MgeZC(h0+5 zjjQNw9nGk2b3JuyY>8V1`_9^KH&s&XgdQJrCn-zkcG_`R62>=*TdEl7Ws7#F;RsWIOxzAFo$j? zOI;1-|FT`)Uip#@w2gyGjLsKq{OUOGtr^Ewu5*s{J%Ft^y%_)+{uJxn)?1%l^SSZn z-_!a=ukgv|k<%Y&-Op8p^#S)&Q=SlKk9B)4TD=#EzrgJ{yjuN7vBvjgT-7>Iy!u55 z|H$si(xfs3VRQ-aKUV_^o#yGch{%*u| zsn*A@EWDo>!SwVa<7=0-%}f-Re4lVVG`qk^4V@t@lHE3%Us=;b7;30=RfF7#^_91w z5*xF5Bb%!tY0>N)MDzrPF~s1VY&jJN^9xiA5iIkdYPu@D{`qUyL4qoSb!u!TGb$g) z_p`$yR()ExODv*GJBj~k22at>k3`JIF&Ssig(aevMSCnjb}9*QpZkrNrKxXr1YiwmF$XMc6|E(pb@hr51V0zN=?2l8C1l!NPNp^GH21!RHWWmzRz*m zG$Xn`7A_XTEI>05-&RnD*8>mCA#i*qfL?`gEr#m!gwVzvJq_yct+jP+MIdbUOUTJM zZM=*cp61TVF>Wiz&`@2ovw99YHa;5kdGp!16R{r5&FemCa9qxaT7iUzZYNb+!-GW$ zR0f@Ck2%DTM)aTsG(7QBQzRT%Jg@ES^+Mg>F<*jvVVu33yK=ZPOfVCFTZ+I{NI5>T3+(ZDv z6JV+J5dg2hUTpMsHXAt|9D>;P&d-9(l34uW?XqGdU6#_d!grTku8p+>r9gfB2BeL4 z-z6ixUR;a04<)Xqv!4y2o2!tmmUygfNYA}dlgxD*rM5$#g;snRR^P*Zd}LfTCKFzK zxL0v*6Hj>l`4r>9lb^{5CE)4D9%GBO{aaVvw&q)t4Hu_{^_mXk?ShRItEvymmBcy2 z)xH`b6O9RVIebdO>4>qUCvn!NZ|*U5O%o@Xj}MBZ`zbj})gDuwIJ%fs^k4NJTsZ0_Mx@dufUYOse*<=Px zR=<6hR7ue|DfjKHtSPKK?|re2lE)b=@gd#5XACCqtjQYxa1mxaKGJy~^nI%Pe!6uQ ziT4@4x^{Wuf3rMC!lyK`lABX4U>;r&8~gHU-;viZy`NdbE<87*ax?91pM1Q_oZZ6I03eJttO{*s*B zUqhwpQ{uMWlikl(&Ks@Eg4Mgw|upqSe6-wdURHm7_faKPP2nsDnt2qiW~N z=c&NEpOzy_k(1A-9XNG!<#a8VtNTI$G@r!Vg@^bxnp34b%RPsK+Xs0%qT#gP%d2aTfx~3vx$E@YurrYq+AZc)OB+SEIYBc)Xg~;wR4B-O_eGq@wqDgrfl~pb@ z7q}zYuE96QGBA7SYCOEK`ErfQg&6Ijg9#7?4gXyBz|3vPuLu4Zq8H5VfAgU#sC$0~ z;`k*cFH*8L@9%r1xi2QO94C{{hG~At0#sO8X=%7L19yU@B_C_g<~$zzl|N&w-Qxt7 z*}Z`fP&;?erh%}t)@7gEO~tW#3${mi0t>XOK>&MWp&zFX^xEo|c{@d)m z)VrjCJbpI({2Q%3Cc4y=>E~O*yxlQ90``}ksbL52NF4-V71%ZsgsIe+Sl@2bTtSD5 zeUg5E*8J0uEtK8y&!i?+I7Ehyipq{c^rxz?RUL-;`_Wg#-(Ut+=1K3|01>m1wY+ts zaD=6$gda$h@+L=ko6pu!g66?I9wEllidQfdUgS&x~0V5Yjxn zP16|4s%)Iu`8X8Hud@A;;ee%C;)ZObQHX6U`PGsc)8g#MXk_Ql1)`?PB zU8J2!{XL!MA#ZH8wfn5xZ0^Bn+y7V_|YMj;vm!j5UEfBV|RM(&t2lFC{_h z%Z1#SMQxN(kl|W4CfYiQjm79<-?7x~tG9;qu<;B_y>8GZb+K0Gi*<3wJrO$mC+^RK zM}HO3qJuR!P_iLF#(GYaczismIWqVF=qL?S5BB5+yv4hrKs3!eA z0O<-t?KeV3-Gx)PY$;Rg=+51NT9;nEDsK5K&4l_S1%?cf>G_bhz_H!&G;i-KX&Ifi z!pi3h()(XFag*&$agz;z^*VXzLC{oy?3ZjfMy(z_2dl!woCX7Mvrd~cHNZ#tI+{!m zNL)D%yCCh`biLV4IQS!=t=5#+94|!;f^|{{JG`!c0t?TD(JU0aye$jWvNtSM+=-q^ zvA2p$>#EXzdZCqpc@Tq-$vbA47v4S1?7R*G4OT2tOu;!?BEP?(16EBo4(lz#B3m&o zk;RvWw*rTA+hv#T0vY7 z&OR;r>e=>+Q|#e$b&f-Na^K`=F9SUsN@PQ6LiI_!&8;&etKe=7yJ92jMkQkI_Yk`u zgo43~Wh}7f#(3DKp;&%%r|oe*b80>vV+e-dlS*z{?6A2UngdYB z{+<8k{{}v*h!H@F*o++R-CtOFZh~qbJ65rd?j;&C?)?;EMrgqr5hR&F9D0Xu`%PWo zSF{>#N7KilTbEx6!oR|qnI~OF8pTd4^zYdSfAGdoD>7=YH%ESSz{S&4u<$%11GQ*e z_k}MkVwN9s`1|-oO^Th7;Uz17m2ku!1MwE35Vde1cyjVLSuwSB8J^9N#W7$KA_4!} zRyxjTAoyHe0!-cw-?5hCAOc8*4FC&Yqf-H|E&y`$+g58$>vCAQccEs1=@p&?*eK9f za?H+YIOk=5pY8z(cHd|HiKKU9t>VkzW?2(Wu0lgwc^C=`Gm!TX>pAEqC&}i{`?&y8 zb9^IWbfjV%b`i6}??%>7=&riTAC}o<(D3D+PmtIMk1wYDQHttNF`dc+*c0$wI7aKI zh2Ai9o~v7QnoB83%Bo9pJQ*ar`vj%5QdJUgl3>n(9^vEy?8BYKdv9xN3MF^}_w(np z@7cGD$GEr!`MJ82L9B#rIy%|ghAH7N4#0r8h zrI#P6W%asaIE2>UC@kzU#Mfg~l~_rX{}AUhX)D!~V9=8$QrA+!DU2w;)cJ-*(g8v-|0)hKWE3e_gquT(-`+V$S`m zUFTQ7Z5Ep~ds7>$xD)@iFdg(O`et6ZxU&4}i5`&j9wCZ38(ll;W0$^uxa^xNuy(eZRr`3^2tDXB6Bp_$$Wy&@|Hoc_N3NMXYGAlhv3IZvE z;EHG@4l>&O!xiJg_8w0M9&QF6ZfjT^aV`iKO(Ng=CF#6zO1#?4(c+$Uw^I~7uSRZY?F3y|SeC-0wsoejI_Jb`;*N0kZ$rMT`K~{dteKd$Ypm zg#nPdTRqk;+1QDU^1ooS`X3j7bQMbYv0$(B(Q)BENAKDp9VkGe0A__FU{}rQ_s{j$ zuV1LU=HFk8mw2iY@v{hZ0g(@Jf~a3xg8$6SH?BWE5M{Y~Q6JWKCCC*v^Nwpq?F=-y zt5cG)+Uos~DYLSTo?aoL7N-2O__}+;Iy#u(jtxAA-emZy$fOAU(va;dS(vCcnpJ_! z^O?(pi>-)zohSfC+B>}|ys*TWEi_nm6QR(pX5PWw(J(&{v`83EK-v>X5fbh`@efv# z;uOwVR&ljP6%xEZh#^m$o|o^RWB5bZ43$GynaxS+`cOLHMY20W)vO_KDhot*-{MbntFP~h{Ft`L#hg8~x zJ> zxgY~^z|-OyV9zp#kjm+6On2~|vG0rJmyStW6mAnqKO${iy9#wpE{{;Fs1wVjEAnEy z9|dbjKgZ)$62ReLw(_;3{neCA8Ayh!&nrAfpEy2e6l01~IPs=3Iu|SJeT4Ym2YBBs zv*`MVyDb$EJwDz+;!uXI@JY)g{i2A}YHu>+$sa{i*JX7=MZ5T6=ZA9o<*fXQV?PQu zw(FdP&PPr9u|j6Tvfu0q2Cc;XD#^R%YUuj?>&>e18c;e=GQTsBW#-^#Wyj<>Em&NZTg8+i$O4>@u22B zwIImbGQASJ6(zdEn25lTcSGQBTC=hpyXPg8r$cPSYnY*ZMpA^eh#LwOvk$n95!O-* zL%FwG2sCblWXjM(ITwr-_un#>g^QQ>dcbJiWTKuCVL@^C^ldTQ$A5}Zu|@0S`jgGW z9dGvFCCTq~YmLj@W3}g(^8i`_#%Q2xdqy`5Tg0m(&y-Qcvo}>UO}FBZEM1q+%T}m| z^j~37!?igskYG@3rIa)E2@PDBNSLL7O~p>wJQhW>dVgi{&A`*Sfymu+9*fK35VgZF zKVbX$`|UW~N4!^Qkm+aCaeJPdE`~U)4lPVN0;OH{@_A~H;XRSiwko;vzkEVi7Go5) zb-*iNP9_#7vRBEq+rED$w72#uN$` zoK>=F#~pj4RJ%~#;T$q^osNbJdTbIzV;F`}^wJ%wHpXn2kyv{@L~P<>V8x|%V+*V1 zUdG~@syj+-lY8|HS9ah-C3$TZ#1_5hE#>~or-*% zymda=~0g7;pA)Alt>a#avjZd7&{-&$)021O ziTfwK|9Un<-uBK{0$2q?NPp$2ijW2L=W66x&&X0^ZTir= zhNe1A|Hk$Y$#CXP8_WOT8Rp^Lp|!TJjcI#-MDw`xIC?DcBnG4L_MYHm{t~166DWDy zZkVO2#CGCD-w2|b1>n;=zQ3W|j}T><-jA9St^S#>k{ljX&A;=Y#+Rd&VPxU|BY z5<2#{{G#2Bg^S8S`fK|v2IRgP@SyC=6Gum(_OEDQK9J33i*j0b8lcGpK7nq}G5=R^ z^#LTnGN!Q;c7RE!*Zft{0KMhV+^sYEHa4n$B{zI%86gA(jSdQ&6VJn`a)>*AFpSTg zmF+*I+0MBPUoc@@X18tpa3Djc3Ho-VTTq~AN|CNG_wtrGa-Uy7N1tZ@p;C1-^~TUAxWB`*$1A$2ZvMJg++V`DXO&Z?P*xV1qb212u+WNpdQ0;N#<`S%uDHGnN6K3WiHopa zATaZ0TJ9rmg(T+)wqMo*e=k$Kau&lMnz{Ux_&sz&3)XDv}gW-t2(?ae|Oz#w^xQ2N_IBG z(qz{t?z9TJiXD@PJOqX%7xCHZv*YT)13_*nlK zS@2J9_|JTmSkTmWM6Hr3GC<=xoi15{ctgny9DayxhsO`JH3Yx~59PCX0i;0&k8hoq z%+ofVZ_gSn=6`9t)1m?NCpRhzk%2Kj4t>KjRiUih9vP^h$?a_9oEr{)QIIxWSCvjK;*+HV@g%b9a^Lx+u-E_84GTQU3ZU13J*Ub%&c&3kH-}WX+ z{e1gZEjc;a5qQny-&VwJecB_fD8kI8fld)Q53OcQQcqAxkI6rD>S7TylD4i9XwD0o zEL_d$L*x+uAU+f~MesR!ao~F$5iI@=IzIOe)E1QWmo@5gx(~_bszozc;N?CL_{Swx z>6g2{b`8&OY0&3}hhpIi}E%_4)l`BYfoiFatLm%j{zlqp~-;_MsmMq9DRDQ@>yPaY2yU*3$ps zQlXP`=JJmpXO7eQA9P}>gDd>di-yndOoeCQIztjtt60I!>#y7Jo4~%!fYTHI;pHiJ zu^JFg(lYW(`R`8lnalGKO6H2-pFat**FjfaD-xKps@6!@jl)>3RG3d|DkoAl7rzb6 zV0@7o=!@&g;JOzY&QZena@)jX`0OynhmcFpPRm;Tq&}Al4+N?ynV5e3MZNAvzrVS5ceh7D< zdajuV45OzbVs&Jai4v`V6jpx=3hK6kPTRVG6Hi7Lqku#s!CRsIb$8MQ0~dOn4Ev#Y zgsZENysf2xm+Z?lwA~}8Cx1JvEzfPIUL33*E-n(Rqveg7bB3c&O|~+nCNtg#oZ%!b zAb^K5!LAROUdU4#ay=#v9H1e zx`$6WyJW=`8K*vX1m4>6cjN97D9R@z}s%42&{9pV7yReTy z;-3I^q}rzj&k+%EwDeI~g)1_I@lu(9Vw%hTKG(9`oHze$YAOhLs z`s-eg$UW@lu(=T;6S|9}x%FMkxCoLDm=w_QJu;k4KB5ZSE{z|B-Pc)NQ%%xcyAjOm zwj}S9R9Z~)o*8S4mIdUwdT{&X0m(Q!OgLG*XnDQ6@hVMvxN3;`A@NPHO1DQ!cIDDR*HQ$_!>V)yK%GD zI*$!zs}e4P&qDdmN@m;P3O8=JbL+I74*BS&=6!7GO~5MPz0h$YLXS5)MR3%-8$@3L zq%)`B%|t|ufpEbx6lE7g(e`J3MOIsZCGVJrPp$`4jmzf#zwBZLXw-j^d}wgvu~$aW z>5A0n`UV|-Xu4!Ek@T%BiO|@zNXn*ZYMM9K6?Ymu$%Xxbgd<`2+-F%ksfy{k~wvgvutxiRmUR#?4WR%CsgkP)y1o8LO zi`Jdmieu4MpY=sI8sLz9cO4A8F&`nqeD6li6RV65>tmn0@zyBq-Tc=qmMQ{Ft-D2^ z7t)3rJ}cQfEdGgd`vR)b9ohR$(+uKO1^<P z%GvQRu@~;RU8p+;lpxc-h&gFts+OSrOHIrMZ19#X=a!M5Il+1HcO?SD0FiIL|LlSh zpO};n=g%aDh#vk(Y&tXN7li$<>c0s*R`dQ?b>wlvN$~k4e6=m1Xizim`~@mcSR>LC zSJZCka@seAV2wFLDq_GcVl3t!yi{WN4+qF;{JmH5dOyXHyB&(HsPxwKO3vYh?` zM3nh>-q-i2Vpza#(4M9VjLCigjt!SLbISKtmjWdlmDkuI(jpc#55CUvxxhIfjVgC0 zF-i&j$rIKcn{BbI->O%`SE~m$RP45<0=y!FwgdV@M{{1nvv_^SQ=PQfs{QHnDjAE4 zxaDRxf*x))dF216vKgyuEY`rRmlvi}j#C<1 ziJa~nEvrukECNqv9QV!<WnkElxf?omM~2c@muq!y(*V`p!xal4{GE`KY;~RDy-L7i9_$=KK#>$!`dv+@(|`-1hN-UY*(~Ha z<_s_ZP~-%ZlqK?w!NOsPv}9@Ah04hP%Tnlove90K<)4v}%|0a7lS_9NyliipY(We| zKOB;FWzJnONhym3CpR~@cM|!#NH!p|aJtZjgD0;I00zUD4nKvp6uM)C*Kg2CC-cU# z+r|jIm$W-zZ4EB8UYnLCrbx5J_Ty=va(xNVa(tI2;gpDn&9%;-?{*M=pdDpH>hP=I z5Dy)egfVFX+UqsOes)B%^5SS>rpbL-UpAg+pv}+fW8LM*^@xi8E;O2`QM8amW2yE) zg)c8{FKk1049Yy7P5N(YN?FEbzT8j6B>EHrtraZD|hX)=D}fY(V+M~6?ezqfDJ zq$i@WZy&Q6EamI}bR}X~h14!~78d{XhS<~dy{hjndHUn+)u=R&Jg)$`yCtcXKs017 zxB8dFeI{?X1eyLJDbG{N30l4OwE{fZo{o8y+%keM&F6QxTZ{keIlXml_NMZt0FTPH zv){*mCMH&EPM>bIvbl9Q-#({5k&OGnm7V}1fPjo*X>)U`z#Y2bA}wZIfN3AoVQnPuPmYN=9GP5NHHKE+q(Y?WQXorqy@EOj7$mlYN0igxq#@t07Kf zpubJuY^(5f{HNa-2fir%7s$E&W@D29cp(AE{_aV`)X=H>`NEw&uR^Ex1D1=-aL-<{Q$WtNAXF+b;&X=5(Utb`YV8OZs{#QYlcdQlbbu*-YG2 zz_(V^O;2(K3nE^v4S8eM`k#7$g0DQY)clfF4O+?%v{HmfXpqA>L_FN5F>vG3Gf4Odv-b_93OLGV`R-su0hD5F^tsICMeAa|{-X~s$>;G8Sblgcm zMDf%4-Q#Jc$)xol6cn4a{Y`0-t;90Jf+FVe5w)7FVYJoZ0Su^-sj;4O{@Rwq@I#93 zZ&Vk^+={1vBPegC)|T}66I~(8#`7aO6dWA%qtJ&N+5HM!M846LNs8}_`OPh+?z=Zo zlz?5kn84-BFxt&9pufF8o!Qb@$lsQRFTGy0GlXtvVVU0WXTh+;g_8^JGj#vTrR#7f zC2RDMCKM0M<)@?9`uSu-=28C-O;;IK)wV@N8tHDNQ@W(PyG6RYJEf79?l`1$cc-*; zgMf5*!`tWH`+n#zzvJ2atToqIV~#n-h&<07x{CP1yWBNj=6H@0F%G&$7}Jm`GlL;U z-6A6!!GUwuUkKblFIt`NbMf9!4?`-%Q%EA^Ur4m25tsu@{OdY|eg7eKWn1s6%^Kq& zR?o}7#GjNFh-TED?_AGU1v#uW`bf^EG9PP=#?v|40YAq~z;F;8bN($wWmWEI#aU(=`xgCv=kphvBEEo{O?b|!u|`m@fGlQQC&c|(xBG{#mUj@ zJ!!F2|DvilMAE{}E8ZlTqv@Qnp1FL0(V4d!7XTUbvYru)VRcOS{RYm8=F9$d!py{>lxIZ#8u>SaI2 zQfIYXO^ZSvGm~bIM&nMh*Jru?O~h0c-`4d;q3L%c5}J7J_A^^r?*et=O^4Q=za;>R zM8kQ#+2}d+eOc07w(keU9_CPm)-*r74j&8B7{^8=`X&+XR_7SE!*}DJeH_Ur4ET4; zLlJ|>do2Ot>58#)&GOV*(G;5<9B~J#h;;L~FpCMr2CgoDJk7D`(iT0L3qRtD?CbGLHs6J!e;@+}SAmH%3fw44bc>uNBKSpIzsAiu3!5WE=3jA zKWw4Z%f{Oa$$?8cVElqGa~$IB9+20Eg_BeDFmrW2!Anb}Kxce7`_sha+HWaIh511h zVJ0|dy)caz<+SC)mgBhVOkTB8AiyzmPuJ2(IGqM3q(0=h*}Wa#ll$_(4(;syWn=nl zzOm8oewsZz`LlhkvB@)`k=vfh2O@k-o>Kna6tq!|hU8S#;+08gm^ehAYu%T94Y=E> z*YG2vc>G!2j4;7R#3U)_{nHm=oY<}nK6187>VZI9V(()X04Om&-koifJ-sgTNBHyE z&)6lYB>M9Q!KU0DzVfOfP-W#euRscRs37f=b(Wl}(&u2qpht9vyY``4>Ui$g4A}6^63r zVq=r-Kc=baIIL80zipM_tqHz$E9!-BXR5>zI0eqlGSY86bTF5SsHPvO;odD3wnZ)M z_@JG1dmOojH|DSDsmabv2veGtTrr9v;T{*vX2qU334ehG!lJEaysjS`CNL@tkLRl~ zxFLC_PwN}F;RKrMs@GzSA<0LvT1fZW>Jv zlM#veQYU?O**ZGk0)nt0L;d@=O!F62bO+MaVT@4>9n(}C4$~s^*I+j~#jobSp6`Xd zW7lsZy1b;|#uf=Dr?uaw=wwE1&lY{cZZUf$U%jmGzuV7hZj89^ZSt3TgGf5Im-N?n zL&MQ`&n*n6EaE{Mxh>NbD#u##TXCVx%_dK0ZRJrCBzs0!are`R(6_gK_ zIO4uMo?+v6ODPo(6kK#x8yiLwshj=7EGxL*6>O0lz8vWl}`&xm7gm9(@KLZ9C0lOW;{ceU@} zfVgmBcpnMQ)cr_raqNX`mx9wjL(a_~@||9Q=;%mMSN9latCe-T&K5{qFR)4iK^-s? zuzgAfzo=!6a4r$Z!2EQ+jHoE2|MNCU%inleT*24hiEjeW=Y5M-M1c76VehK?52U;+ z$8{Ocb&u^^zryUhi0tdkUJ1ib7#YeB!>^WlcQn!KAt*@3OrWB7e0vEX0I_>1Fc!u+cdO zH2c?9y0_cwx8qH6{oSyW?BGe*6a~qp|2U z&Vg8yt3oEOpxi3z@vOU?3XWm5bU<;z{-Xd8SGND& z?9!-7_;1f_)z35NwWorE0Ie%qLIr#uuOCk=fB>c;V(+NhTGq<;%Mm>w=q>e`He?7= zuvPTxwC%i(M3l9%<)$q{G%n*GfnK=i$D~FJg(chv%HVj~c;6AYM$YXrJ(NcIl8%s9 zl5z6%1Zl5xxcVVTn)>+?F~Yl0j(b5L9RH!lfvZF^T@_)Q=ubUcf**jK{W}Fx z#Yutfc((v_RLQ=mpBbtsRlj87e?J9?VblolJqCL%XV|rV2Z2C(f)d<>WDykd^Q|52RWr%-{6>qI^cVh- z_37UQPx!Vw7D}=Xk}i(DPCM=rV;vtH#1>d@PWmKoSo>Z8kpC;kr$ldT_e8n=`ErhI zrfa)^)YOpAtewt`e+c}obb-6bX%X$_{d=i}2CUR{zmiZ9^DAL>fco7HjuX?9Mlq~uXBS+rL_Uq6Ii zzO*dl>P*krms`sm3pz&hInBBPguQ5!AWs_lXmuT#){T#2(bkT0c5d&t>=yCT#F#oF zTz1~QXL=^O3!N)~K^_4i1zpLS0TJ@(A=wsi9eK{_|;HDJ*@CD=8kogh{C6Un{0voc^_D{xim+drYC(&~CB zgm3Eyrsm#Zy35lUE|{28;c%RY_})y)?8?SQv-^`JQ#4e`i*4VLFvY5KzqyD-d<0!L zCZ70uwVa*B3R|{GEiHKoOx8`FPINeLu+^Ead+)AZHOeb|96so_;5#>Zbuc6F+0%0R-L6_1nUR}(@s%2U@|sC5xUTzL$wx#+wj2~? z7)_fugFt~`->cPColq2#*Rq7EqqBwPTtvVUyPm4Tu4yZa3zTA~=S$u{z)^7zd6ntnPzheYTik;6AxCg3gYQ$^_g0XL^7$U90$TT(7TD<0M{H{ceTrfqIU#n$5& z1n+tp>eSk8A%+*-H6|%aba}4h!=FAG^fGFx<#&&4tPBD5{Ko+_M2&hZST9~4Q=UkM z8KZP?qpEQtOe?DXFfJE3&Eyi`Wn~72duU`;j2z12o8Ev*Vw#;qg zK1Vufg>QbStCkFHd%(v5bvw6YjV)xTaQDSu3Njyi?D}kY#5;>#G`q^IFmQ zKSKwp1Yfr1Qy1qRe4m=fGNlw#nZ9eNw15~k3Ks}_H3(;IN|A|0Nm-LQTV=Scokq6U zBTXlMH~{pU{93>O2bcm8k_dGQ2h)nq3dRQS#7!m$Q zbjyUu?D$X_naVxMX%Cme^LxldjkXII5fjRR8G5dS)me%tmgW(qSte;<3;=ivfZ@{j zy}=@vi0a;SV0YsN!9QLMkpO zn4R72@c7kDijR>4kq^;&lUVT2U|f1^ywN~HES-P{Nogs{su`=Wgxhw%dPH+h!WP*Y@z|4RVXNEEBAa#yU zFQDJ*1E`&vm5lpCI*>9=AzX0ZF=Lt0S|yoVqS_yn5C`o~{k%3e&MJ$62M8{Os@os7 zE})rV5uy4v2a#ogb&H{*4xJLC!Jq}<+98G;9k(iGW%oXh85&F7hN&`ldYhbbUD{2B z@4EsoRbP!JP7`O3QtC85%78it1^4l_U5dD#nUzbAkS9V07I*~#+fbOMa1k(q$nm*^ zPtkE2M*Ae^&HyZUe}L4-6ZGSw&i2@1AsO0J9eWhHIJIUz6^V|ld>@o|Fz%A{zDm-t zAz*m+ddpfSQANxpgb5c^U7H^5EwlJVTlypB;7;`q!aX4fc!ia93<6JqyW&I10RdUg!_v~-CVXRo0ia>P{2E*= z@x_>N{{lK-z($wNZnZh}`b|RyNQ%dFJ6j(IGG}6sw6ILiZ;#F&a16eyW_pxLv?1SM zTJTS~YP|(*O%PydDt>bw3hE;*=4^nMbXk3UC$7$6L(5>Q=*iJ(09QONW2VCA7Kvy7 zF`Nmvw*-$_x~c8F9> z7pwpWsIWPa6-;V{4CsWOa1q1b>_4eW#hD3vPEeFiZZP0NzSn(`j*KN|nTVOQ=_Ntr za0>_Cd3vg#K;@4)RTf*Ga&4Ho?B%KfnRdjPHEV+1C($yGf+;T#ow}ulZ1Gc@2j^kp5+1OKsU!@v3>?)MF3z0sv zT+2!j6Y8K5YkXFwP{e3JpVffHZ>7tAc+%h!ai`kBGy4VZGqQoJSo+s11}N(#rhMDJ z!NSB|5(HK46GKVq&*}n#jv619hT$nb6Tyxl^;ccNimEktsCl$=muq;{kLOxvxEv=h zmVYI~PX<>M5jZr}*2+4&HJ;ALL;A|*ErjJJdy)i4@GP=p8#$vys%JMj|nM+ z>=Tv>2B|-e$Me)l%aFU_qB6WBnVRRzF0^=sWMfBKA0q*N+h=d@+riIbXBl(S2{K-xzcR8i8Y@|s+NSL6l)~}>B zNos`jh38O##|#xFs>1MXqor_1Jn#y<@NNXiWzhM;2_}1nG`9^`G!>Vqh+)zWi@&6t ze(XCKa1iV~02dlZwRU2U3z7&pW+#eOx8lK2q=+WHZ_&bT1tgJH7qX+GFPC&`+kucRZAr%OXmNN)H3|1>1Z+w zV0oJDxr?WwqGH4fd^SNUzc(j|b2jlNV1d6e>jR`#8|3oe?o{WzH-l(_7sFYL zR6)h5!ml>^`oP|}zAw2|S5i_A5)J~Mw1B$bkSil&cFuTQm;ybo_av`*Bzo8&_v;EG zuzo-GSff$qwQT zn<-ph_%WS=dqX6?2b6(Pd#-V5N4*q_fs0@Q%t-7WNPmbhRmZt#_Mgzthq5&2dAXqU zh6iYyPju9%2nqZ33HT&oGe(0nbBvM{xG_LIQq|rjC5(w9CXob+GQ0wTL=ayX7%$5+ z!@V_s$@_-k)w6hogo`-kNV|>^##STA~AMeDm-Gz3Q17d3n zYcCeItip28haJFtu%ejibJdQD@GB7`7cfDsf$WC zv~9k(Bn7s+7@3w!R#d+SjQdEQybMUqs>vb#9~qvQoXt`#Fd6O<0D$=4fGc8Z6dEE7 zIG$LqDn&V8!mw`1h&5&kuB!vvj46Jf5t zdT$Bu*leTm83LF$b6kx;z+w`Bfw9$3aul8Z2EfkE@EDOiG~FmB%eT^+j)0tt%b&D9}zv~lxt<I+`NUt&l_glS>8grk8Da0tc+9!39~61NCwMpy#$A!RLMz$Siu61)~O zhy&d9lrbNA41z=2BKm~F#~^4_t{HuO%2DRZdLzf(gcH$o6>@U;HC|~ zj85MtCqS+l|;njxnS2L`exlKRX+2d(C~^CZ?AUQdO8q zy|0%zBUVyFetY0*e~AE?C(FUgM9|=#IZ(SJ=k1MOW4|t{XoZ?F$EEF)@qP{vD<&#q zXmt_;3hu=}*hoz|sFI!)UB_iiij;JI>TxNMCu z$L?I!^_!jh027F_oZ0t(f6N%_|AL|CucxfOL?Y&_u5HeORiLh%Sy{;q zmW;+>a43A1X7Ps~(?{B#r*Cz$U#1pIhv9$fE_<76enl84Ju^I%bXca@7i_M{kGx%8 z)6?AX-3tULXf<>9V!9WlDPj;7eD#a0nSRJK*C-{q(;A}di0->(kcTa#F zvD{c`6!{OhSil#5t<8N8@L07w$A*^3AMA*w!G49&U*@~N5-~)@acM}fF4!AnZ+oVO|4>MAqs3b@!0)%)I z=N=)zt%g%6&}v81wEwUJ8y>rlm2i21eA#t=yAiz4DpK3!`Y zvMIOkdT)f3S7SQ{a-jF@_zsX3z2t}2A^Zw8o0Xz?wkQ) z3Ti52LijQyyLQ?9OJ(sVWT6>LBp)%k-NBeYO6f(LwJZCXNuhmKmgg6uo6Nw)0u1=< ztkAy<1BT$cX*O#bYJ(Hg2y!sSb&9x>WDz79LB)>?cjJ~QEkqQ;Xx4Lc5mHd-==D@r zg9)f0M_OpWnnjX?4)9p~0HC0vtKE9kt;PRtAVan*jke&53ihD3TWbY&65u9!`Es{$ z1;}qWwUb!B4Hq~LrbTetvOiu!7OSGG56NtFDb}dqBH`~!NV(9NcIO6&7&2R=Mvph$ z*&C}_4Ml9gqP(crycX+kxc6q^W4vNMrE0fPMjD*j40H?suH$k!A+}v154Pxd_8VCJ z5}{6?D^Hc_8zbQr`mB!%YDxIFj>fD2H(3304SK*qVgNHAOHoEvHjUF3Ka1Dh?02p( z3*NQ`1%*{6cY95KiJ(kRB~b)NnZf<9{cUTwJS8~9=sTEi7R!mq-hh)Ye^;lyxcu=p zTD2uP-xH>ZIc@7rFzK4%Z@dV@VvBxisd%aIc^cf{ae{pKB2~pO2+>@$jTIb`S&VKv1SJc#86Yq0I7kdKeg*q~*_hzusmMw!;_r{`CV=3_61 z7NeYZ)2>886w&_bV&Y0NhOC6#zE$w8HTa*jqL4PGMe7Cpx_WgnPKN4=KR(u&aMowR z^Lx`M;8k+_mP-urAvt5%1d#`aBNap1rRK62ysIA#iem7Dlnzf8`WL5hn*nKnf-eV( z*GCJnKs6CG`w)hOg>`XIoHO0xY#xeCv|Lg_t6ge<%4zqF=&RwLS$Hr3FByc=A-#E- zH_nQ++Yd|Go&GVuRHUa`M|;~$8PMk#LQT#r4Bj|XMeBYG!?oCm(vkfIWb=AoVcm)k z8Wl=V@8dGx>$c8yD!O8MM8?`2f7Ykl^(kDegFXHR!F>8+!@d9XB1ixcL=@#{P#-vj zSV`gGYqe&pvhE{3bP|g3*f(4}-?@=2A2Df8!+_2Fwc7|fQoPTI^wH3CV*#5v1EqBw ze{JC)7+1I3ady>?Mqqz$6BdxneabMrW$@V5kLU@)!qG#@pO~&*qVS{;lQn0xzCMOr z^P<62F=Y^wof0b|0T(Ze7(MS34nQ_C)OOYxIcF6;j4MM6HtGVxZ-#OJ6N6L@wkESB zTaNWdRBR@-OyBHzgA-b@H8Q&1vLnHd8@k^FkBjxS6}_n3sRojvgyetqH4T)pSi#sx z(37tqn(BnH5pe9bohte8(?7ayOfY+gNazsZ!07Pl{pf-mj1Il)EP1)J-EohQSC`~$ z974jvs0@>B^n6eLEU$2|_J1wFP8{*5WS!M+r|%>_ z(wR@eW=Wy{DcnbA4s*4|6Ap$ z$Ez&~Df)hCK(cERqi$=z31T58B_*X?I{WvDEM7Jrj~N@Xa?Jboh!NIsnqQU+e2VGI z?z0vfHJ*d(=y)9Dj%L(Y0T%Q1zrV_S@7hW!xY#W{3BF-A>^5QEWF0c(V7JRaD2Cj+ zqte;l3g~<0Y1ztu;>mP9GiDX154>r0e-PIoz+Hrma1SC5mU`#ne(h{>#^Q48>w9jV zWie+1b>D!@EuEH28?IRH+g2$_;^p<|_fx;&?kGrbp#}-`_Vpb)dOmzw>tJnacWaYM z3nQ<4Z!HPP7nJlmTHV!O{kCvpigJE$eBSGidBQ7{p>V_Z4NNi_N&L5;tbhwGj=m3C z+1$vQ`v$Ph{VG}Y09!*~$#A8mrLhnkc?64`&s(1#zeH>rZzj33e_4?y;GxccY0}gw-c!jsLd72%xr=Nd9kXtp1-9)J zhTB^1`Y4edsjTAE_pB3kmE(7w>RQc?>$HWj=JEB*<6=;#)$>VDsAp!CZz0-uv4q|R z7TiC7(U?s`Lx}^zCjxTr{PrJTbLKjIZ&ws!g%rGzoQck03yOLuaRS@$TW?C9F7A%1 z9Ctz@l#qh0KFVE_*Kh1~ulpt~N%!Ku_r%p~Jp1;B=dd$^=hJ(fR_XNUK;Vt1Hx*s?WLl?gNa&Psuz%56l^N{y`QS8*Sr7Rws8h@0?Wg-hTcr zV?!h?o$|+}ptiJtfg9~j7pbQa$k{qsz*=X;^+vlU_;~f6R2!rb&yIY)R*u(xw_coh z>Mor)Wwgz;z~4cqOVZ=rqUO z94%a>bUjlReb>pE98y&j90x4*(ruWwUyw-hSRCLeD4W_biq$u0Pah(C^`Y&!blix5 z9K`i)8$=megbvDb23=Vt6g0%pdj@|-dRf0*RxSCouFGFBK}M3}b_g82=qlVP;zK03`WL16n`Fiy(9rq`ywOYiwLV!KoCdElyW@R#S>4hAEQ z=+SA1^87{9{$ea6r#fF`ugTvlonZ1n(n>+2PY@G8?;x}jpe_O(Q8tfr0~jRy;9<md*Bfu6)r>(?syh&cCpJHT96}DSr$X~J*Z?$WcIP) z&y^7_Hzw-}p(^3c!09)f*Wyiq!0f@3ae#0?whiTO4Oog~LY#XQVt*8|e zdN|En`1fc22CE(qD>$G+@Vg-Y+2Wa*Jo6)AC}w*$&)wnH*8a~$*q5st%vZOt!AScK z(aFC-bhPZ`-w#f3KhuRJcsCiIy06fd;Ei^jb_A@N46(Qof%O9E#SYwKy&)f#YK&nQ zbexfZt2qn7=!qLR zyLsO0n`!2i6n$oV6nN#9Fzk_7Q4(_Sj*I0~(zrmk#X>z%#b&KF35XmG|3NOvYVkWa zl@h^4cW(ekzo4}>9e^4HL7TsD=Fw|rTFM6d&4D03HLyL>+I!Bntw7noD%X#w_TS0Z zvLiG`8D?_vq?#z%K6Y^FqVvRAL=Sxn5TfX|Q)JmldhKqF_|!lZyJmO>u@QE|3^R@@ zPv*_HFS}muE z00PEk%uJpK7k;X@A-muLf}z=o;mNlvBetkZXi~M+Y3RPEx!hIolc>UxTxJV&r5!o2 z;?=LbI`&)j_S`bY2Z2=g3$G3!vKJ;}9J@~I^6KgVK*245?p|6^F?NX?X>+f*zVL5{ z_sx{YKHwE>9})85Powoa%x%Wxh;8GVN+UMg?OKPltVGz-MV*GMIG*YeEzx{$BVrsk zo=CS!PAx<;MBLk3It>^3%bL!OBLABr>Zuj|xYtWSBTC z;p0Q7p-S}Kj6ZNvHJaCif`t^hZt5HHrOKC0(sK4=MjVB(By6WjTHuX+e*`>=eoa=j zg&h{A=#2n_G=0sE+xsg`_G4RcVSPZs_13D;0LBJeQ=>7ge}o%p3OqN%EH{Q_cd$o& z`pds3EXwhE`|)|Z2jEXD7QSs1tHobsm96IV!*NXf)*e- z8L0^Oo7*T@7$hNeOy6Kh0?Ln*6O{NJn+JVS0=<-2>={?RVVldaG}jO>rdQ6x%c~Xx z1<^llbcFfbWozF#rXK5TynwVZ24U-P#}G#eyNMOi&?W;GJ{-|J|LA7g!10jX$3GR* z**pDnk()dg+2|xI6?ob)wd@X+LDWt>JD79J7_}EvfT`wGdR_gG(%J%A+kP6iJAdZq z-2tD|M#b}keQfpp5W@VJPfphhATa5Adq))0tgo3`AzO5LNndsKT#%*XuPDjU9bMT>8M7v-&eGvN=+w`HYe=xCjB2#F0Us8Y zwDdFf0h7>rH6+EPt)n!D&5*bsOHRvbLhO#uf5^|f`KJahUT-G%w>(nxQ7+(FjUPC? z+Xrwt$ng!;h*D~@QkQaKE?OjgbU;II6jt#$#dYjeOeI{A) z^+P($&jSl09%m^MtbDWzQDUo62^vPBihCHWJrOosgW1p1D?tBQJ0P0i&#O0Sam7}w zfAAIKj|C@E?I4XT_SZBH#vog63F3>0jK(o!}?25%m0d<=Q4lefObgMU^1455 zJIpsk&4>6>GBI}6(q6#PbW7R%f<3TWwK*)=W23C&g~XjX1>0sExwU?TX|&LZr@bzF3OaX~Vwxmxt}sEDyMO5a$f8F;@ zh$rQMJifKq0CT{fmW!ezPz0dZn7L6xvz8ap;7Rc#0Jg0NuX;DLpW0q<5O15h+G)*> z5BjUe&8_Ug?vB=7cQSu@Pt#`=Xj}$$T#sttv_5_Xx3`3dbe?Ez>QU z=*!Ctd$dB<=J(=1_U@8j)mObfskLD-trst9VXWC7kZHA|4!$TDGXK)qF@LRoi|ZVO z;1^Ru+4{3zcG^eh^y{KD$gXkW7*xTAA#2B2bngKdVz7riyL+Rg3^NcCm*)GYCo%9+ zg@#tlnI~@#-h${76y28bfCT}ZvB*rADy!aYWQ3tHCx1%YIL6|wQ9*NY1sB5cUYSE- zlQu@jN&cp~hQUNf^Oya;vGJGZ1N&jm_G(aMq^sV0e}C}A{9GbzX3!WDV?sxSIb6Je zt9U&W57l!}tXLtegc8tE|COr1`rZDgNFHjN+ev910uv)+%&c0AS&4uLA2Co%hlgu_ z$67qFva(t%R65qC0g%c3^6zjI0apw_>T}5;))+PP9^`YF`Tf-DBG!9N+1fO($6f$! znfpFkuD+q33)OGkPY5~)9Xh!~Fld!<@3;WUk->;s%My4mnwylmzK#rllYu|jglq(W z^oo_+6jR}lGSLFgttwu*u;wE~lO{~r7nSCC4_QLSgrEecM+|c(Aq|3G8=<8%qHliC zFntD6^069(l4K>l{3WK5JxAh{REAP=5qde#Z0b*BeqgB7RTzn-&-5-ILL&2>sDe!% z1}>>s1%;(fB(nMQj|Gx1eREu|vC*xuSXTl&c~7OZGz^6=!^cOcc>z68f=zh09`E-b z^DxZw>C^Opq$vEv*}nE=qxv{A*}izE{rE!dh7UpI$qS&QloaQulct3<_E*5Fx(`4| z%iYQiazIj~*>b&Q6fR3(dpjSXHQ6|RDo0RG0ixEy)P9Nb;48rAGiQtgua&U~YdqR? z-vO5!wb3S^=?#)=t?<{dZ40&T3#?TXkNm|N0?h~FR^Q!Xhuap1Wjq99dc873$0?bj zv(#wbzoeM(3$2sS{Ian;mhBB*)Z*E!$k|SZGR1USqUzP59?pcMF3kAKJs)Lag}4!c z>q$y-uBlnjT1;tWa4-L#8wnH@)#&9$_)nBbnetbqfCLBirf*xL{ha;YH*WVE0m*Nx z69bB1ix%MJ(X&zO-7)yfF?bDpgZRCf>r61viI7eycs#g`f@aU#EI#m> z70w%%?ct;7t#GI6ahwNbvlK>E_5Q)ki=ruN)JyyXXeG}SD^@vsYinymO3c*m?Co6v zQx?@gaGHv$>gUa1jQyEnl)g};1S`9#txP}z5Y2iD_!Zv&ciA}rJlpd7QLP;v@wW%* zJ74i{j{`Kb)%c!mM&*(vdaGS3x2|ZFbiX_aJn(Lor9}h;al~t1gb>osi}8^%^+=UW z_-Z7Uo7xV*cu^65$+1`bH5xI^0p*J=ci6o^8)PO#8zS<)PW3DQ%8FdIJU1eUES8e%RmK$_@Mf7+410zmY7sVg)3#2dHiPbW60yYcReBxO+SYGOWWo2Y;0~dsrKJuq%o0r`BbVirJ_z4|6%xX1iLT=w z272i^6XSAjMIObbd3CHZx^PG38Jknj@rBdWf&VUzgrl!ILs{ORyv9n{`%l2l3hK2?Y z6z`0tT)jS>C8VSb0X|Fej*gDTVf12kb-)^?l27j^ux}TZkdQDA!%%f(jHa(t9n*a$ zj&;6%6yZ%``K9VuJqggxVn%8Z3m76+8>7g*NKG}E_w~ZG;>Xs=1P-L z3C)ld7cX%=j95DBdkGvscG^%8n2keO;^Bto#4I-ufv=V_0S2h*+KAo>gQ4p$JqJ-{ z_X(=M5n-fk1}L|Hw~xS}3pbi95qVPCM#SS{^Tin(7}mASk~UWTb~ys)RT7x1cW0O0 zBjvT1xB5`bOT*Zj@G~+Jd1)v;e#n0EbAtF)r`fzBN4xu(c$9!AInJ$2_$ex#@w#MA zjn}XQL*Ln-80XdtXM)_gVg&!C(^d)dHxfm^!W5-XFY)5XspIl><6*-t_kt^wvdPs} zbEPm5MW!D@L|jf+@!plq$tWlw1DGBxV4f6w%LMCdeI5^rZ5FEofDm$YbaedBMx^N} z#W{ZIdL3SlK+-b18x4}t^=EwDPZ4thshp6N&mAD!m1Jjh?3{N^Z|eDy`U3=$y~{|h zOz7p1_Ajt@&3N%gH7l_ysBoemp$z$XGGy#}E8K^6WBRSw-&BMNRSX-|&rFJo|D^Z= zJXFQRUdMROZ}OjPMBC2NHB?5dsc>TNF`q`Y^jngUu)badcZ5Y5&PD;W!9Xq^vjxob zF)i%r?@CB*4T9?93Q)DyM^dy;6Cu;}`&KdUU_|Rvz`8Q*3XlkkbuyVhbpd2omuhKL zh((3A<}^1Z**^WB8S2xmZ7U|>BFD2X^Yht?;vcroMwNwIiw5j74+R&`H@mJj>(7g% zD;z=Jv-9nD^Y~Y2UsjyU>1cLA>)-jFmhLh)R)$w`s4mYjnCjF8Be~u<18%>&y}OP} zvE7(7+-Zt@2UnFgv8SIS-8PmW?ap(abYil;`ruP1plx~iT9@N^5&oJvUqw`lsM+TM zWDF4S25Z}HbY=m4tCooqO+8Oaz!%60QexJr1DNlFeI~{P8lubARTrmiXSIfXFaUwu z4`ye;X)wUBqF^ULWJyG6gzuV8WPS#RW&-0dP3h27ww%#zdZD9C8h`D>hTqgzJ?%$7 ze_+GsSa$zLU_aITOy_ZDn?-#ZqO0C;rvEq8QWa}qUCW;Zs<)a--`JW3iX5R%c{?5h z#b<^eGp6$W1}65EWBLiKk3E}qH7<(pjCLN)jb?ST19aJ_AKAole*qr6Sl}$w+WqxA znK?b3+#??Gb(Q6oWrN>`9>0?zse81C7^g~&^OcvC0MQ10ofV$Xjb;7G>^O?B)o3TK z9GoL=1&!sg*#r=!`qRCrMmCIb%^RZH_3|ZC@WFJV>lpd;@#=uvx}z#Y7-wU6jUK^+ ze>l|d*F#oP-h#kzfnKr0RStD+TXmGVJ;|gqE_3=gV%v$mTwY@5uqqSjX>SCyGQc8|X5!yLpJP%K~oc?dQE8G93r;q6k0N*7R8_ zb-iQ_Cb-jR;B=3rov+AZoh&zH_FqgADl~6mLvpRY&yiDZzjhyJC8MDaIQMHAr?v}o z#ObZUtUlyPoE7rogH;oqKWmqT%-mcq_-UCiM8Y-l%hfIha3aQ6 za%Jgsm=*eLk{2LIwaYXT*<-e^c}e~PfA|{P2ojpFYl=U!+4NdgR><^;?e*XQ2GAC6 zr(?x*C7aC4jugdl$y+9G_((xZ6T=I9IV#HaeBnj~K{K?8N#6@cAF`NRp}Jc)!W zKWfH|d;av|1B`%x^&>$3WwNTmXXxU(+oqIY)GmbuS0M6e80d3*0$!8^yqOY?LkSLR zrqy;tKDXbuioP~r>yCM@+jB@n2jC?b4w?*}XcFVT0ma%Nx zE!(#BfBJm?zjL;oopsiEdfqSG_jO;;)*b_Bnx8>*e4Y&xDpM191=A%Z+pUCKCtP;t z-^5l{lfA`F8!fe+&?C7qF}%O(dF83s!2f%Pxo96zB#)A@)c>amzHt!938VLxYkvk4 zJPgx;^msp710`JjlgG&xRgS1?1Kg8_O&Iw%HPaNm%rt%3I^nq@tA+UXxV`8Pr<$*! z!vkDr@+IwM^t94G`Uc(C;248tzh+PMjqks6XSCMV6r%)UJngx;!{Udr@)$mU`<$%m zo#Ui(h{bindAJ75D$Lpq{5^dNN7}w511Ax!>3SIbKP~{P5l4ovMy6b!Dj-k=N>l}- zP-4+vKwBx3!eH`{&8O{o_7~8W<*kE!9oh;yu?ZyfMPw)ct_W)F2{tM&c2Z!9P_hZLl}g%*`Tv)hNaa_-H?)FHWmFWWrYFLW4DZ~FfFYD`HJ2^AGU z19MivKOy@a{(SCHWdKCAAUL58YU}IEqr(B1_+R{q!&T(PC=L$_F)iZpB|w7n#xB=e zo(6Mb;B?OyQVEOyD7RwZ_J@r_QLLCWijO7@Rmo$DPii^ilXbr`@RiAgMctoOT;zcN zleb|>`M)76TNogK=y=?QgF?jl)Z{WX>}JD<8n9SoazVW_E86!6M&0A%^5lo4q!-(d zSTNa9WxAw;Mf196B!HDT%~I=G|ja zIMS2|1Sv65OLf-l>1Nx#8IoCYgY;+rd~tVxCPqHh3LVL<)w>rTS%XxO^*ZC;o!`1RTD=_%x}n7W+S+dD{#3vsluyHL0}ZS5=losc6BFm$T0ypmb1@DdI{YqU zay8jQ(=#w)-RZSKo<~Yr8xm`vAU2yzqTa8CTMY7jliDz)B$n-Om0~(sBtT(%#mPa$ zkPTPG&RNBTiZMS80|wsy|NpUpi*)$)>E>kBr%h^SW=4ts`H* z?dP|`_EX(Z1)P&F9B2V+@Jmwq9Ky|7q*&Y?Uw*5suxDZ%CW1bh;EXnyyMfu?4qX@g zn06qLIkg0)IKKQWsOACe5ZiyG` zhPB!uw>AfFTHQ*Pcsx673UMHv7L;yPV@)q0sGW{9$1(mp4_t3)k#NNK7Tm0cKyxe? z8DpZHv(t>oZ}cys!q5LPXkGSkMP${-T5rGonsNQvivsSS4QYO0?^4K32#e+tj9sAZJLZ;&q)-%Oo>g|Y3`H3@)_g|d)5n<_Gk<^AqXOBM4tm6)K zU`#cZ!C6()`&$LDSql(qRrEit^IZOrV|zeXuke}UG;$> zxFrZ3hS3P0B3F*fflH_1meO62gjAn4MI*qVQ6sw zU9W6P#AfA*OziXVEcb)4mw~(DRSUx-m+oT2FgRKs2FXgQ2q6DGpUP}!`x~h$5OvHT zETqkPP4DsbSm+>GWa*EVc{)oNG(l^Khj`Ojk}Hyz=PQ zom@RWZ@d^EAQ}qnCR=-l`taDn0po+Vx6u_+-#K}lq~$bNWKZy4=#=%S%)szZR(3v& zKXBqsV=@qybKMC-0D7JEW!*M6Wt z}BM`hj#{B?Q%^KJWhFH3;Nhc|@ zKqN~d7~C|ciDtzeR@n~_({j3&&KoP8W}1sA?(XJwCkwrK1_#Sn>dEs`uhF9HM62Hc zasvPVdy#;QLKjm^RQ_W1FNYGE%>E#gZ#TutSV0rE#=heg40%}Xx-Zta^1}+f-voE5 zzA?6VJm61xPW!u>QXtONs)!nrAw;AqTS0i4M?KM^ao2nhO{$lr)R2_qM1X+En{&J6BY+>iDwU|ny z(Wv+R@LM+vn35s6+TSFI6|Tza++mPzA0++2?*ie)unmPO^qD2F=2(AbIVPlB;!x#Y zlQO$dw&+C@%SDK;pmgk=c=kM}@pz`J<)-Lsh$1s-nsBW`O{c4kc}^$eygKj%zkl~| z>btdFA;VQBmyaQ;f}j(xPVe4(g=@|I-^-bEciu$N0pir3EWw&h=gyc8G-eRa0d4M@ zQ#(B*Du~N7pATWptXf=>?pWL8KbFHiUG!l1u1Bm` z@N7Tleiyge3uBCyO1S&pjB|e8>t<``z}#I@PHu0UbED6SBggXLXS;2Q48I#Q!H6EBMrvfw_TSg=t(GK_F?(x3Njv8C1^zeJBlfz`*#N^niyC3 zau%-=@ODZu4DfAt{|!Llad|F24k8-=z&SI6izHGAsACJ1v*f4z`)DpYnADK$%dkGl z?)sH@&dDwR*O?8X#0Xon&TA60_sA14#KdO*35*ve1Q z;0!VTB%wr|bDVEhQX)i>oQFl`m4T@<>%xvjdFWMrsREe1Vw=SCKe&2v`uA2E zzeyY6V0fGzpRa8QJh|ItH?)1THf5%O`4kb*kaobWmMvSnzdn6BseEF!KxXy4lk3l9 z_Ut^@JG19hLXA#qT-=9Uq!kuCzj1-dd^8Zums!_37cB-UeD5d02Zr|u@8`YM)hhdb zoe_tBV^4`uC#tLm@2=*k;GV_n6Rtd8s66(;QP#b~9{o^Vg}6EaJ4zwJ&E+G_}}FZU-{966rHTJ5>};pC4;kTu%}%arq)z?qB4&j@p%!5l-IZ3WB=2A8JjLuLwkXJHQc{W~K!@eZ8Xj3QIqQZA|in2(jhO}Jw$jw@auo#e5 z4I=X0l%M4SL>kRn@w#?w3(OrAJGOV|Xt&1_zrbmL#qF%M!f$6nC^_nffUnY}u9%3b zEM&AtLVVRULOnE^SEx<;jV@Bqmsazuk1rP<9u5HomCb=^WsQtA@1K_v%H*@f!coXT)SgSf^@73jcWh|T*Jj}9 zqGR=Ai-F@@uWNlQn~hF;K%zfB<@`i>R!8lR|G249PY^6{R)&14-T=fjy1{zIkO_}a z(Xn;S3qXm2f`aT8bX?~JZIhOBtKs);rtCbu{ilq;2SDDF9X$*GP14Du2;Tjr4C>m8 z>j1etl0+P6yI|UX^RtlwGwAzXi{nJ8>nU^VHyXQdKBZ&6$1@$FMm(a-T=Idt_7815 zE5z<#GrH|p>SkGUaaPD0$&)p1Vykxs6Lzk%NOu?7Z$i31g+ZiYz7iSh>2|;sLi%3qU{+L1q_Vz3wbPk9K`V2U%Yv>4)+8m z6klz7zroh!bYEuQbOAmW&nHz2Lk$Wgxw)H(H+8kO>Mc%3kjHXo4Mu|qbQo}eynH8` z=bVU?G_d~{Cadqemv}Uul({(-K95VGa*4dLv2pInUmn&f`f7r;(6xwylKf(y_z{brm%;(pzB9e2tTnmyeDpRShpq%>f??77gSducfzVX;9zW^K>57csp6}Re&t|FV z>G}3vfme08?s=h;yFO?0@^Ddg%}qlSVbB{g-Rf!!7_W(Rygyq4X-1L@l{&Bp2$gxd zy5FGcvicIUoytEDeyP$Q9Hiyc?5qSjx$ny1m{UYncsQ8s5Ou7+Lw9;{X4P)|o4VfT zG}q>YGF-rx%i#Dd0YCjcKJ_L5t1>n|-oe<%-k!P6Y_izr?O7Y>CTUV?9421rP<{=o zK}?z>B_^iJ`k>zuwb4}Btk5A|Aqy#Mkx@2jQxDaK+QO-tmfdH z#uKC0u_B&8Fx)2&l{pNadJw}joofLdvhK+vcSG*|Qcv2a>u9S`Fg*nDX)Uc@N*pdX zg#hVoNy3Efay>O7|Kld)sp1(sux^2?5%MwM4wuu$c&XN;T&Fd8Zth2XLPGOa2re)= z#s`QZrYHX@*jIOMWU<1n79$b(;x!&SoyQ(`@guI&FX#P=iP(gXIkzG#TdwwY37^h= zXQ^nVH#go?;{Pp+!w6+?SYrWY#?C;L(Q1>uY$}s~U!N$*zbd^+Lf_Z!&ob56xDaRS z5p#E6Qk&s7p$=zEUJiDpHDisISAmVINmsr#m_!kGwb))YD{8t#j?PLeOP9U2B45^T zeMF7~+*Yx{4Udp0Sn&1OcS$XjVp+J5*Jd|*`@;qbqGqhSO5(whmZ z{#Tl68~&JTLZDrI;~|uLi^j!$OGxzqojdJ9CB4gV+U!2qp>~q&At8mSo;y{Mv$C>^ zviUG>TSp~xE+-=)2>>8j?f+K#{aHa%Q|jla1K0p|n_ZyC^JNk#3_4A~fg&j@OUA;2 z0_Y?F5&W^J-53F~NNB^F#pOIVf~oQBWxAqMXk!$J>gPXl8kKQ%aKD% zp8W{96pWCx^p{U*FCd=ZkCPzS0t#F*U{`TD?jxa~2m?FmM7HDNc;89dCk>pilh)!N z0s%fO^4-smjT9pboci#B{0{%VNwC&BNS>J(hJusJQPZdaWNwVvw7(gOREx3=PsjKD$T{ErR>GRp%SJWWB%6;ML1CWkcw;>N$OR|nKXU6u4q~-K@irZN}m=s zH-?Ye#ziu(n>!sc6Dy5PPaip5sP>*`8j=gXtTG0i7iI#Qr?SujnUHzWkIfc@U=uSJ)gSv{p>@X`fp0_<5W=s0 z8YNfXf&Kq8rg39xcmhcMMvKVLL}j08lE1EYen0TK-#fsgvW)t^wIm}A6!F88lLALx zbx-i2q|_xznVDmNJBIkhdOIvUJkanISUA7wW{7KNAoMj_1U7I*c2uP6b6=+hh^t$H z*;lL(``C`5Q;%QP{kz*yX|+fP00XcMpBH*?XoOv0P+H=6FmZdnhMRB%AL>mr$czI? zuO*|j+?`{o|85(<%YcOTgR`@$=oV7M)%44~386218~1C&SHUTESg`VI$?B)eX9C9E z6xif*r1I4>nH4)a9by08iRT}*?;JwLOc0r;9l41T97KiCM$fIO*QUhUX&~n3@R3gf z+m#Q9h24Z3VLK6U&I-gKuQ}a2FANJK4F+pBAacSXNE=T!g~VY?mRl)u@eZ7BIjY7~LwcU}a03djB=z$Hi42v`+_EkB1_|yS_qI>&`cs z!itG?%8da7O51lW)Qpa?4Og!JOkW(wHwjX;O?C;EM;okLhp9uROvjHF3z2H^O^XYCD3#6qLV#Dfi)vx%@s+P{tBqZOYS&1Qoqwsh~@qM&cY7fVe;yl73) z@bxe_DeB^!sFwGp&xBT9`6r_9SGz8lx)6-@<1PCAtI$NkM-CI>d5>v5cUIQb z9O%Z!U5bl~ng64#4b+gy^cIUmc)$AHj5AsWyU+h_T%Il}j;$SuZW6Iq`Mm~!waE)f ziW2QdfW*!Lk1{&MU!PE+H9PednYf>fyi|@pjys5KRrljKMpQa{Wv`HmC+aJ%E9!b1 zAH89qeM(ZbWv}Kv-bgPRp481Q23r`+fD;*)UXioo%wn|xk59O@qy=8oU+D?>@P(aj z#l+!z>y^bEUB5vY}dP9WtPg>k%e~lQdLoLSAP|@*Rv6wJ)_f*8D(^{;3M?xp7 zQCk(EcZQw02HL8{6i2VF#oVWevu3`{`!8exVS-F>_|hY>{bDd>=)zt{F2*htX#0U{ zggqokrby@7LA1E=E`?0~jcCgMoC=wCz8*)hg@EL`J235j1m)QwgN3sN`!B$r1M+kn z7*d-xwYl?4K51%VcEtBZ@Dy^tCkV3(C1H8kavqT8B8k9(cR-UlK}T2%AZ1ECNhvbA zZF<{I0j7R^6zprH0Az;lOksZNoMdPIfuKtF9(**}IT2&A^a%a6=%_)xq2qx>S#`VrCsK=VMrzv%X-2h`T)#+?Qc{hnn$fCbOL_ z8eyRP2+i0yHZDP7FhiVWYiaOkl@|(?+a$R8RJ$%D*s*i}?5J)rAx@%mVgIIzHp1=f z=-_EHB}yWlqoFCB(5yqK(^OIAd5dn`p`QJuy7zVG4^X3GVw#_Yg{B)ws0IAvq|U#BxkU!`~>^~jUS!3eU^k!dvw znw!-#EUnIkZ-9mk>AMgp`wY)sPlD1laK$1aA$29wYo!8L$!A74uwH*Uy>r#BKxQ!p zCexGQa&%`Bl7){$rp)w`$(Jy`_2`XTpE>mI*B5{qC3ItccWy4D@U>H=YotXDINg?< zYU+a(L$Gs?w#1PW9EF;q(xxo;#`(Z8_D}e3M!JK_neL}9rh!;vp`I5!5ijf%Gzd^BBaQ4{1`^FG_m_O-^S_x1c(Q!; z^H*`)FzY`l#oF;?{WdMDIN2dwy4(ptV%T zqV)W^f`S!55W=EMk94z||5`45`u_m*9Z6|v5}>9nxH?tf6M4&% zAsO4Cs?ma^&nNCvsO8hCrW{RC&1WeY#1!R2tubI~$|74Oyq4yJSiwU}MBB23g{q3~ zuwq6-4@ewh;Pd_Z(61pW=Rg7jvL{O3bd4qPQs3h4$?C81Vbq{Bk38yYqx?XQ zAjy>Kwkut6bw4Kjptq<+f2!8jZZ@9T5n;mjJnSTO{CcnVdi05!IF}Xj`u4cu^%PzA z%e(s={|F_t_4qxhm3)P@Yb#khndm#A7|?jNkh6-gyltSQpr(^`TR;3}1QS!7(9VLz z(IQQ^zd%K8x|{gA%5{mcZ{xCg|1RE82Y2Dan?9P}Z*aBGHFjRJ# z6#gZDL#MX}7SlDxJK+r1?6T_(O7ry=nSS}Boh@b=3mDk(LiHqk=~$&vQe!)l`>2 z+zNpYf~`F@WBU!eqd;01aSQE68yl9TISSuP-}jC^&W>sQjP=OMEYGPrZ{AQ=p~OPl zol%Rb+UrR$s7Ut&vvUH;!W`Qs`KoSL7L69HtFd3@A9rxsUM<8GW*vgUy}Djc8}g@` z+<}yYZpq|n1PG_pThDQfmqT}NVTg71=4Xs;fFDF#ry(a?^2L^*DsogWNwu2hLpU^n znT6^6fw*(kFZxl_o1R^TAsf4|RdP7}xud8|mPkwWl#Ltzf|raAzBidbdvcuT*6Yax zSo$V^0Y1K;cdL$*HFjG)2@@%lrG5K<`%d5gj|*_+_IThpiHwUo`jvClud~zk+fJcW zf*p5OBmtjh(HxgVcXt54_vXt5S?hf_W{#9NzvGeisWqTi1cJ5VVq*mX+y@}vI33Ld z1NayU4vxpb85Y~&l%&!=S`d^ZFi!Eo5So4Fo=y2u`)l$owJ$`tU2wybXK}MI$&&Vk z&jwzTB}iOQp9u|v2=P?J=Ls7neD!ZHW(9jjTuXFpSGN;y=X=P%KVS3ZB&O(wdR0i_ zIe3NMVtXQMjFYM)37)5=y?P?q#6XacslpMG6I&rGa2~~l8Q0F?piWR&ei2pusCDUN z3=U?<7#J845?O5#>et^s>N(_1`02}zkAR{Y#F)w5q2ygk)ShuOe{#13q1L&Ef z^Ff;8cx0z92=dn*bCbIKxbX4iUxNd@!|22Ii*owaz@*|L@DlDailB1gjvyQV3MqwL z)PUKKo-7&{y1w6#FLDEN4GiHfl*sv`{TC*wefAqO?4i;Y(y^%n(b2v zbzP%jy*dm`I9Z1jiBO*ZuB2qgmZG?TGrrt0v{9Gno9bEmM zbFfLqy^-XpR8HbRk^@8_Up=wfR~%94S#uDdSA9HM1$FBAui4VG&Xhb075e`&_s|mh zHb+-bPF-&`)B~4tCQ=@XY^&};PT%g7P3aUwX03w6f{^g>MYN5%zKeh0UYOnDtIbl8 zfaGC^OaBxL^~nN_4c>Fp=>S)EUwa(dDQK_Urc3sEBJeGSp5BXCcREK3ZJy!bY~J`@ zdA=N>nx#2gXaP?5dCH>g1!w@-dIE2^sDLFc5gQv?-I8pl&zl=Sz4`nCW9KVNQBe{5 z+PI8rN%mAj_yxm9y(XnJfta2A(Dk zcy4d6NtQB&BHwdx*sSbK=o*&+rKn>w?JSMp_f?(D*Ur&|a zQ>JT;Mdu$D(FU_WL?If2cLUO3+NTEE8h(2WYCQ@?@1zbcFrU|L0k--D+kO?kZ9*rR!(DYvNBCkZ!fMUfQR{mcWb zR5lu}&O(^>0y)r*R1FP{YUZ-Eh$DdM)v68Wr^Bo;43Keh{q{56#dvw^sKmp&)?!e; z>b13Zi-?uAZiK_1^ycRJT7KyPxK{}{$M+nr0H3Itatmc)Vc|b?sOaeZ&I>w&0GEDv ze0&c8Rr|fD$#>hs>JJYGdsM!1#hRMVjB056p5&#U&tP-eZGxEArLDJn&Q@q~R<_@3 zx}2{L*P4u`vYH`$x`zU53mBnkzk z_7})ONk%62h4ifBEoRVPx)GPDU!D;LWTaynY1KfCcE9x%5{eq*izf5rHeH&Nt{c%D zy0;&meE8AhcI}B`!H0RogVo9VF z?%VzIQzX>t;kPevRFb1mT^okob^*d7;vYl-r|%uF9Vqu^m|!KaIIZ}*d*}VIspYD4 zP`y{Z(_b-20?!~cykFA(vSL!=rS$%gkRU0=!{bl6QapE{g#O|rNHi4<$DTpw&-Qcu z8BY@+MKo9AAwb9Q-7msAzGe^CC+tD-CpbrmlOlxf6yC^4OUtj@>v#Eq4$37c#pmS2 zOq=flY+F8?j)fC)z&#njXaPHD=%kfLyk>0a^{xz(lEh@cFQ-_Xcr|ucB zR+smQMdM?85Ab1*oH@%x8xp>;!DC5zpQCy9>GtYdO7-`$3y1fb5@Mq2 zV$UL~ks`_9jX!A+3`~AV8u{dp3Em8nnl&7lw9_RBCc4^H50(Oux#*6tS%nT0YaBMqr>)wc7 zFN}cRMoRs$-}jZ501=nNN*_o80eD@Ap}j&e1bl9Qx9Jh^Z8$P2MqnOUO;~!^&vL~g zjpuEuHHf&Ij=d^oUPiaLxfNgzF^W6FLxa#2F#ox|iPu^XB*H*4tUCP3^jh(^%L<|l`QHW4!T9HhIfaVaU_*EKG=~g9e^XFW9>aXO z-*+OHYNQ|FB9-|wX~HmTRD&Yu53I_qF6OV<2Rda%lN2VxB!oY|c_?2n7P_!$oGpR{ z`dO{grqprq#UwuJN2)M%@g*j=*tdfAG@sWhmlT6c!Hd)R2!Ti@*Zy1}2ywCvD3x~R z3NNdNsG8+WG?|KR7%VFOEiWDzJVf;M+G(FrZ?wwNHGZmu+*G8f9y8nFpATo0)Ct}y z=PPw(*vqdZdj>`RvlAoW0GxZMgoM7!d%*b`2=z6;x;pmb^*|sfB!tE3 zaEdgCaqsrj#m$_J_nGK)+%HW}^4wkBu7~+n+!USW%lz+Bl@01tIRFC`a zpS-&N7`1YW<#8*{nKmVm0kwy4=@F;J+1)VSVLu|At20gbE^kRVK%C?n(vHPFfJJ_v}tY_3j#*V(rk9zo+ZRBY7 z0asaew(jiOZ}cx;zJx_a8Uvzg^Yj6L_3(JNp!ey;4{U6J_d8XpOc@^^f9b}{zNRDg zBxBQdCEntC;j_F46j=fvudKjQ`mCJ&{8(G14h{|m0qU(ica|3r(gNfZ03US3yhnVd z{D{3xli1E#ICtwq_p0sYai`4jLZ@5706?i&wI9THI$ewdsD=7a43Of1eILfIjjS`oJ4k>fxV>zm%E=6Bg~n)ttvXD-{93G~D#?esy;IA0wD}Yg zGOllZx(++bPdj%<|JHnloWC<2Iicg!pSv}7)#}kLy$=%p82XG6HB}b#qB$U+3bTI^ZRzGwk;#>8Lvx3AQfmyeqOEf6tQRZ>goQ!c{NMQ^#a-slx?Hh@ zKAIzKQ5LV4-IiX9W$2_YY!&LGz5W6iS1-wOXtU{xj2jO15ZMqhn7n?wASbTpB@*#* zkdvK51KFfl^QDx%mB(YWvne&6@4jy5y#%jjV=58>tNj|S>h%s^U*Mc7eNHV}`UnXu zj*1GK63Zh|p(i0Eb}l`{srO8>rA@BM2pXZymadXO=s}U4aJ?w0S;cF0LxWjMSpQH3 z;=bEM>!d@}Ss!V?bDns*pbuP65~QI1EX$@XZgxCSmEoDEXJx%fyh)+g8o1o*W&CdY zvvKZ+UXNtKw4{;}3cv-}YdNVj0;ZvKdL5ai$|Wq;%WS|M;RoE+b!}S_4sqP`sPIW= z>+Ko9_nT?=bk%II3U|R!{F9qYWCN%Pfg6C(>k?E++NSgE^h@r%2`DI&Aseq9WBCe< zz+~d;{Zv2Ca+5?1!R_I6fp~*8tB4g=rcF}?Q~H$Wy_j)P?}RmsTE9mAaFF>LhJ1?e zioT%JsV2MmY9ZJ1My1f4t=;q8WL*E@%YP&Pk^MI+jTVSwiEb4Uh?=jAGYJsDd;r$@ z*lBi$j?}pq#b@#XGB7J!#p`3m^!#`R%{QYvh^&H{2sECaJ_%lHXQ{ zv+s8pgp7+Jd6NZg&#QMqIG|1=zgX4V?f8yL#X!p4KcE}l^jJ}jAqy;Q9PVvQ?z%NBKHxnn7_mnD`L8<*!E7$0@k_l(co{O=8~)8 zghjC?B7dyf@YW0kVmnbqr`eotQ_&04pg~1DV%&eMLML?tnR!6J+%@IHcQi+a@@rbU zEPH|JwF5me>lxjGC1vB_BPEOHA2O~DspW_Kb@|t(hn$C*Do3lo?WF<$t-ml)#m7lU z;F<>y-`eN2t@nmjLBPhHdMou_;daM~+y2EN66D9zn;(AlGkoz34E9%cPce22QNcRr~tt^?Z8-RDn|(jmRh{ zJ=TqDA)|ZSfDtpucmJI_rb`^5Uz}ts4ZT&raxajiWWk+<+$xRRC3jU8gjBghH1xVA zDmt)Td||%TJv4^#7OVd>|3<>Cs)Dcd$=9X|1z*qJ#KrqpM0U$9icQhpCv#{ z{*uaerNaL@X?2f3CJeS`;)&%uh?M+h4N^eRoXtqlIW|V97O%WZI~1St#)GK+!TGGm z6XbgvW42uAdo_Bkli2DkXm*i9c;m~7x0@}Yr{7nN!3Hc3Y5{M#Ys;9+*8{D`?Y9!Z z56cqkjs>9z^3wOAbF3F}O>`v{M3B;RR(KtUCm;s%jI?eH^M~xlBf^6A9d617Z*s?U znfq4o!?)Dr=qeu><_}y))X;;5_*;sXPKDKl{PKLdtP@BzJecZ}; zZCT`T_ty%wCTB@T!&lm<6by>xTf(tBU#+oqM>?e*%)OdV`@GQ3oICA%0MD%R0jn zgz9PclgO{0=cc9#I1>#Y!TIj^yKZlXE~9SQ?Y30IQKVBrU5ahtB@BcE<+t!(XrS(_ zUY?ypDj$Q&^m4{4vX54ucT0>5@&6o(auas2WV#dT_I_aScUgLeurTrkMn-6UYu%UM zYly1Dx*zdPv(vFWMUKLHewBfs6us_AJhGCdC$s$EsJ$0yPo+&|+Jg+aKM_rBe_o%d zYP4+SMw9VnEF84h?B%j{<%xu1v6)Xvz2IYF4g#_Rd2T%}M}*KoNH~nxw6tNM zex3ld)Zbq!_-R)Ig9&;lcl-hEr zh4ymxf42GQH^VR7TCO5)5VwR7cLnouCSr>3=`E;i_rFXYXRGWl!qlQOOqg(LH0ar~ zuE~wozr7bOn6nr#V@o!lLbVM}U_jBSW>g(!bqmX$=<>e3JDlUSCam$yTxnLQ2liSZ z;fu7#5|v{ZiE%>-sn-$k-dq`q6EsTsPaF0YI_G_Ky~Lcv6k9C5dB^aizX_Y)G-^G% z^s1aL)ns{Nn=FET4T2az!d>vw_r?9pf09sxynm*5-|e++-n#?Swu$PBB?1Gp>X2Ub zGp&!0T@AI@qceM4&$3!e2&rDaM2sYAF`z>x234mYAWa&scprB&*}912>CWR^0wNG!g>Z~|gw}MooMw?voa-^Z5pc)^CqcpHePyr2x@T&AC{+A@-i?ALb+aE1v0|MX zIiE4a$b5z8z^(^krukdClmKu6IRL1QgFuN4v_IF=5`=N_@d5Kc`;A#jNpz*Nw(dMM zfaom1l=YJkvv_vjIj8&mJJKZY4LlI)0EC(WnN8T-PUh@)4fiZKc^gRq_i|*^XGBl8 z7QY!iOzW_1znVNC+h7__GAyxe$&C1GWbhP!kZH4UFKr9N!<3R^^}TDVeRfd zVBt%CPZt7sA;q!$lsZ=R(pwGSx3+NTd)($oaf6L^f{cTq1yUn*7AD8N7ho>VdN0UI zc@dBOYws3wFTFcRPak3!pG1=18!_g7_FTgIW%$*avlB znoGbqpiwXdO$aoy4e#ik{qUQmw=L2u|LuO*dj>1h!s_xeO*4=+vVY zTT~3|1=T^|Tk!(|=jC{T{lo37QcRVS8XJoQWTtz~JOSzSt$yvLYtQ#*WlG*u(!ozR z2cOOe8-S|&C*pB5U|1M4*c$|k8mlx@qChxidUK>!UJxn%Q?9Ft;z&u!2C)whDX6pw zY0Y@x(8YfRQVGS`oH4onEYbBSxL&>VdNKK2jFzFtb{Ov!pIm~%({S7F?@6Rx;ud>` z1;#?IIEwUsN@85LTF}~0l0_Syw=ZT=VDd4cuiGrrBUwSv&(p<<@IpTGo%Eli(_3yG zibuzs&9X`7kag{Lg-x0WCD+p28UR@Z02}vBf9RhrxXuBcAHlwb>WU2FKnw86Km>4s zDZqr}r>y`JQ!J2r@fl*v>U{j;_4V~|wYlyK9_>NY$_{x|VdR^J`(xv&XthvoKL?rB zfoZufok)|8d5ipE0HIwn`i}fj7$+43Jd|B4ld+khMl-~0F}uWaqABaSsj2)OpG39$?h{+Yr0tsdjM35R*y$76HQXh072BC`{>2atZA1?%1`s<7Xf_udkLWUnoWhS~ zEEqgFauuItOntcq$l2^xZ2t2hcFjz-kEpZDNnu7Szadw3v)Z&p3}nr z7Vew#ii-eV`}58rD~xSBRnYkzdU&~s1rDA(=2CFKF=i5y{@PNf0SJ=*F9}O zrw9BRDXMVc<1G2_uUt!jUrB#|ze@3^H!k2P2;dXndwO~pBn5zH?eAx{m?5)izf(PL zyOIACoG2;HKrX!(nLkT#S1Ca75vrb}FP^cl75oZSTNq}bG4WMp;OaM79HYPy4?-f? zjOjP$O}9~pxvhrbwTCrN2R%Rszhsl{YjVW((evKc8qgP9enZk_Oym(2up|e?h&)|#I!FxHSrRV1cHYl!1iqX$xG!tPfvdoK1p(Zz5(34SsbGOsvrT_!$1>@g z&g7kcmYl?A<|}cWD;OF5go2U$Lnvfu{7uN^Bpf^&otnIDE3Rn=)!Sd6nCW~O!LNSt zlFc%z#2!`KnrAC23{0*!)&)dS6-<~YN_+!mN7HWp{OWi&DY=G`7O`YS1dx0mF4r?} z`M=ZXmw!GufD45P7z`6$ZhUl4R7hOd1DEIzO7@vgIfNnuQq{s}i}ML^EK>eZRaM=) z+uAH2+$jw|$3rF{m{_sx90ySA&-4A0v<+C2W5FL!KlZ-9ZuU0Q#Fm_JBcz0ixI>^MStUEt{M!##$Zgx056c&ByTb(`k~7t|=H9kh{^BaET>YZ!RNjUvz^2EaSC;R}PrrQ$nvzE0On6^d2J6jf1;hzb91&+dT% z$}Zr{2GwB3+x_v8eN?YZ?|AwO`~leM{oL3%CJOqTSMk1_*0tTv$jD~%rU3_fY+_ z$Uti1b`nv2)zvyw+O}&MqGu?oF3$x1+cJA}k%4G6jt=mi9gvyRAlje5Xf`IfwIShX zdUpH*hB{5ZfxCxlMCx>-GpD|u4e$;N)KN)R`r|)rybX-y_)P2`7IS9h1g2kS<#=59 zyG{VDD`2+|$&F$*sLupw5_*hc_yz#>f!j$%BNm?r8#6OA`x<*u$Wcq*mP4H4_p$C( zFzHN&fHRJKkwBPQEoN1TYGoL`>}_dd`Y#xPRr>n&X$h;_O4kI-9!l@&MW2==Jfc-f|(j zQm1tW5Mvk`0y)Bf!#sABI57IqnO*yq2RO4A7Ps0-10pw|y14|Rm~Mc9B%oD!{4l%d z=*^^=<&XBwah#zXzU_f;Cl!k&!Kl_B3~Bn+PJHp_w|;-e@v=RnDyEv6n#MvyWm%<( zb<@2rlBiJnoPmb|%kZ%Kx_~fps(+qb@oLV=h*-R%gTsNbq=S#UgMe%fKYnMY^qqd;Pw7zPqN`_y+reSBIq!KR2T&7~fY?VB#YHBqY(5WI9OKj-mp%d9ya2*JZa`yDm^n zjR7CyBPmX z!)A`T1Naf&xc8??857WAw7~vL^34L7LI8f@WQ()@okT?J4TToYB1PU%KM z>FySgkPhiay1PR{It2kqLFq=i;n6AGT_W8L|9$>`jLyi6%6&IB&hDOr72Qo!TAam) zP0r2Owo-bHZJfL6n2ZUf`v!DK7X*aUYD`_*4lg2ze$?^BPW8ilAc=Vq?LWOA8^`IT zXk%kjDPyjvt4mV8*{RlgG0cC+0zBRpL%|RoEp0ms3cz${XJ?^gH^=KeKP@NepaO!V zzE_6O>ZU(DsT2#otZTjKULp*-0L$zvj<1IJP@b1*J+CMg1;H;xr48ZG3Q5>YoM2I_Y}g(96(;YTyz)CA+o|2@r~rov6WDtC5qd z%gFAxq)>`DsQ-$xvlKV@QQ_h{V2)GVowD%N`#~Tix_VB#v8jbHN<3#`zTh%=-t@4i zi2J3j<$E+0<1FkxuH12xpW|c%sp+yag~)R02nJJ)b2ot*Rbvp`C(*2ZCiLgepzz_> zLW_A6$hRLqjK?=qQV8kj=rpP=alqi^#^z>RVxk0?cSQUo%I|F(gyA&>l6$ChV(a7m zVLg}-t6K}F@9pomJb$`#U|)|db|ZF(GBhHy0zu~xlcWhK=qt6Jo~@A+w@KdYGQn1x zSaIO{osuphUr3f@QM0@4Wu|$|^Tovwi5|06ns9D&^L~~j^Qj`Bw`|PtxzL2(MYUp$cHp9}0QEOi@C^tgg6V||!?7Rl zpu>(rJ~u4D;R*qRZ=A&Nw$W+EFysYV$X8A)T-(}I8K(Lli|{?VHy!WrI$$o_iyniq zUa9OE{J2hv!b{63du#N`)lUSUv?rzE?JJK9-N@X6U!tr70-v+ID5F)0Vt( z9c;Hq>0^=uy0oqgH}6jW$Y4Z>P(6S86-EPLRCdn7d_h8ke5>`GBBq%)bF68!uQYea zq9E%Sln^^WF#cIrqESghqZhEtv7!WmVm+GBl>xN83l8WeDoX&xIyA@+ zFTK~iBEqD3k>QI3HB)c1S*441^U=h6k!}bUsy{OE?(k5t;6iD~T1Yd}ziX->*slDU zZ{L6CstO)o_I+@71xt~i<~xR&xPy9aKUFiO8dKOVL*Zsmmo|Sn8yAAzi2hJZyz6US zsZ;Jda(-pa-akg-wM!m6))5VbY*TniN-=?FY4RJn9ia`&K&vjQW6A+zVa+z#C{lb&ryp8&3tT@{_p|kO}1h!_;9C-v!Pm zRGxs@0GyV+r7JcKwe6@H; zDJ&>eI6_IjEniuk`!mHsx{XQv*h%ASN0S zW!a)==+^jw=(^Otr|;DXWA2D=*xDLhLv%}2>k=zp1?8VzjfZu-yIX~r;|t~@`#drT zKzMbpp~frCv2S6E*i;KcNS@$lY%TeZBAi=zaHbhJ*56^qf-%)GT2XklK zvn#Y*e^~oHQo|YJ=kn6o?3$M>oZSQ7pPG z=W0M`_3NjZ0vPV5`X2ttd=(JQg26sY%rNLkB?i!X{GA=~Syye$*q9DY@Y#}w`cHSn zrkn7sCS>i0-wXLB6%ADZm2((1Q@|U)hh(AWC;ro&_3ERQtt!v__T{Q*D3yhJ*0I84eUgQn@F1QH9Sv=n zM5<6QYH=5wWujka_4HydJ=uelss(ITot&DQ5c-mER1c|paCo8CTI<}@XA`%RhqcCD zM%@ld^o9Pj)>cUy7g*KmbDBke?#%jlqB76evG6%Y%d0=Bc30P<^@I4+N&b}4o299z z?pwq2*8>A&in*)5i71}%JgX<>dN5-9skf5LPr3rR-c@{C|B5XNO)D;LHq4<_@Wtse z^YL-O7ezC6#|aw8CcQCsuT{=<2G4yZ+lEyx=@7lnL$h?rOa^qT7QZk2Wlf*mvRR|~ z*n^2ZtA&6VSy5R_!{{cOWaEupvpTT-aj$;LB8*x-4-^>|ywm~a1F9JUL89dQEGOd@J`+e{c8{Z!Zw-@rhb}FnzcSi$!|Ji+S8sjyki08o zbcVQHVc%ytRdUbH?Bf8daxt)8CG*gh|6P8g--7$JJ&DQsq5Y8QXw+}{-QD{6c$n`k zk6+y(VvClEl1fcRYlh!Tv&ceYcUQh!6u~ryp4Pt9Og~x8)h&%Vmh>N6&ZZM}>;#Y9 z@OO0vBqmzxQ!7U~Y#Y7f1eK4z#k$SOF?hwDyp|N|3K^m)eh~E-ReJY>5(W4D5K@8b z6-{EUVrHgqR!PgOf?Wf%c&!K}`L$UygDfZn=X60BQJ}V*29yRt;sh-j2DHD#qL!C+ z)qEG1SjuBmyaiEMO%lE8iBCDDu3otGjTv~S>ywa=BT1VdHlczQSMILvp(8(bFLblD z{fm|!HKyp3?}wK`7)C@{v0H(coTsfJ?^oRLXq%Q2Z%LVg%v0x7&G&`|A;`q8C)YfF zkAy*f)xXk3edUp^Kge$cO*DG^*w7xU^kQ58I?cPiVjJdiLp-cceYx3dVU=IYDKo7K zd6?aBMu{C~{p66&cRHEmQ~>EZVkg_GZcP`9M&>b~5o|nP3m7<9J>A?4+GWfYzQh^P zi;e^0sT8t^e3l9X;Rj-P^S9vTlad=sgA9}c5wwave(~HO>QvaLFfTxG# z=aWKL2T3Fl*)c9`Ch5yF;I0Erjt&+SdEt9!fs6}or$NL#e1X#hIYaWn28q*o_Lr}6 zHc6cM4V!*e?-_oo-Rtd*MsE&C5Ss- z@6&PE(me*_t$(y*2qB)ai7=`6L2F|Xeq&*7-4xvZFqqLy&H~pa(z?!MuqM2`*{Hvi zC1iShnyRnF>A|z(Rg_h(OapoIGm$_gSM&^S7~hMgxzXii?C965kTfZQmH0p)%Y{DgLG%d}X=q4eLa^j{p0@h!_SwzXOuWZ;L-M{sERJkLRxlrKtTJi2Z6s#vO9yF% zk~WOf@)Uy(0<5O}SXg-EVB+->W03j0FK3vnSBm;(OsJk0X8bF zriiMIRr|TDf(2&>vtRFIxl_ai?1y+GTaUCz-Hp-8{Perys7br+q7NASo`8*+W2l`U z0*y}H6+NT_pu1%lnnUX9>Q)X|x>Dauj22K%Oheew#WV{dH=wE+T7{c3tJh|2&!dH5 zZ_wkExuD zbGu<6$InVKY8@h%sgTcm@ouMks?OLw{rkO1?B=TCrqoxL{2L; zOs-0vRc9(3hJ)0Xi-Rm|N3G1wD^K zgzx;JjbgJVoJCD&;mDFB3po;w@2LLbg~4+5!*}Mo|zqsk20g)GYdrrp)gC1W$6OLZZ)r(W zu*kMG>6(lVG_*CSB854l;D?Fct(`B6w6A3Sz{P%YROZ$~ZP_(C_gYPV2=CE1aa-v6 zvmNjyoKj~W$vM1nGHvrE&07y?Wf8G&I^Xod>yshhVYTh4$RUkONUn>yN&Ji4Zy zf0Uwvrj1*MJ)D;d*%b^lzfrbz3he8tSaC(E)cGTjIUR~J&If!uqI*QGyON(e@g34{u>ueZgO$=e@YAQZBsCl;p&3Rg-!Y~c#9Ng{}6_*uL z+Rkf?^7-EEsoFk_C{rvwlaTfM6sMUw?dF;0o(_j}wK7aQ=zZfNbi3w5+IXr}TED8N zhCTIb@k#vS@ie^t$|Co&fp}CDTbl`0M(2!~llyP|veP;COQ#v$Rx=B6?2kzY9{zcm z_dzZ=)iZAd2y@LR38%_p;+^*1G+gd}EaG)-h8~@0-J&g^{CEfD)tyw=9|ASj5EIQl zCaW^1Mw~o3MdzX58(;lpPxY+IEckFoA9#~OQ?R|FGQ&3n6`z`x~@u1a>Yn#lA<<=Sb2(o>6 zGO^E$5vDO?F68A1_p_mgfd9q-?tJc~WvOXbptQIyx{1YiXcjJ)5_G;O(pQ!_Oa^4ekT5QB5< z^6xRYapEyp!O-i!OTaOque&LV7jkiQN=sOKsbbrDN0G=Rn7J--31QOg zml=5sTx+4MyQmVIEtBzd+V=%7HM{HR+Izw8@m@BSD~kW1E)b-^(V+OfW?!`*BT)}) zzup}C1@$``%ugF(9q8Xz)o6J?UQPejYISY?%N-#|?I@~>#6D@*AV%Ol23TFu>acmA zo@ad-(1=TV$5WYbUNM};eVnb&r~^@u@q11EDufKLgyQrEqU^&Ymy9}tsd%Oj!GzVR zyKa}kYaJFOg;~)h_iHa}lp8euG~}4CZ3NWO9kniorNqN2p074rP<%BiF$WxDKdZJq zm3&A%mb~1;Qg}fP%eO+}jc-Q69_-__ZR5T+#{01ME`cv#P`?Vv27izJMqrqf8zn|{ zFjPs>LCzFUl+#eUANQmm`81TC+F#^D{cbm6n!ACYvPm+OdD@kZnK=ejiQ+(ICLS;b zIdtKuvw-0~E)wFr9Qt8CeFE7^@7|mfXMX4zAT5V-a{2K0T(H zbm8nY!gbiB>LR21{RPYek-xn)#fNM5JYOp;DqRB!A*kq)u7C=*8R(72XX})NTIiup z4#N$R@HqH)0NgX@-3T==%Wp526KI!pbad()7$6tSIc{^u8kcc(h3DHau|tS_igIVM zWBlW$7BSM#MNEh3a;wc9vJV>ab1e)S1b*b1#mA5Pqs?mmAFU!v&g}zoj8J~GWA`-Z zE^=B;Q8hAv9}TtmQgnz>mzeCcu+X(+zXSgZ=rLW(O|{J~EDQ(TWy-3mU-l~h|93Gc zEap_b!kZbN|Bk|2c*pP-`fvEVK>s%&odEb&JCr{gD4e93VBvJ2OIt>(-($PNujm2h`hHk|vkf zH_mUZTSrc%Xe{JbGIjQUMoKJU&Y{B=p?$Tw9c_IjY}mElxz1&q7Z@*rLPb59xzPbW z_-2Fz=cH=v=Vzp4*-^fO;3qm_5+3H7cUdlZdvYl>t4g=t_po`(u1yME>^li4vSo%% zFLlj4SiCD$hVFvgWUZZkhTnq+lye^xh6bV3Fg?*E^TzgfHv=iVrT%M1JlEVQlra`w zF%{%BQ=P>;U$R(=r0F#D-b+}>k<^qNHxNJf(Id`1F3OAz8{()5&c~s))YMQTo}YSa z#9HhwFx!O)SbojsQ_5ODdf81A3*!1Z@fmN|#DFf^x4RTs$F?WNms3($FoIsS={?n)sBb^unsB-lGZYw@yn_W89*S68;u__O%epI1e#FEFaT}Bz zmib?&P1K!h4jt#Lbh{lvyC3Rw(Hwt{Nugim7ow`X{tAo-N0^0Z>%-CfxIx>S-{Zow zIAH%ER2R6j&K^vzUI()TqyyBXY9PD3Gea2?{aVo`=Ufx7E`A-9avvqopM>e2&F7DL&Ew>1hm65~NcxSXQWtq{2O|K`Qd|A3~!kS3sK{ITx;+h_tGp|Sp(}ha3ID>@)*&O?>wu_-5;wvD6lm7YJ?qb> zwBvOX&4Wrf!7c-2(d4)RQ@YT~ftT1g=jPqHhdnPbesCns5~;Qo#VEc>H94K7=#>r= zpK0!AoU?VOMi$grFX(P!*rfmaRQxM|*L~m4FTx@v4Pf*j5GZZcGi|qdXI)Z&C<(Y9 z5;CW-RWy{M#yR?A6y$=sa>K|Cw_PnT_XP??Uxd?Jm9KGm6g)j)3jmJ+G2;KG;ss*V zCk!KW;2;?^D}w-JzHZ+uQ#7MyV)823;&mfRMQV2pnF0;=S}>7qTa67<;~rZ;yFul^ z+y`rI6E_E5IuW%Uj)Mb@a8R5e3B00vC1>Do*Eks72gH2WR zi9AWca5+DJE|Kn!_n2N z)K^+)sI`kqX#@e_o{@ziV%MaLc}d?{UTn%*`?|9Mp+u#O%`pHxcr2Av@p515#Ef(N zPWURX#sb?q9Yu~90v1u@p>I_<@NJ|eR{v#Lk!a|Yh$tw!%)s1S9x6OO+{fz%V`P6# zbD7fANbLGKA_Qi$kg#6Mz68q#E5a=?+(dHsM0=muR({V36Z~;`*%8gQdg^Tn)HqoU z6hhAn6m>Xa|D0l;c^oyQ;UgskrO)S7=Pre-kc2Cegd14Nd4?yYaR?U(F{6zqyl?;) z4*tEGIcY|Y$78GgANRU#Ru2ap(JHimw{VXtZ@do-JI;=O3i2=!o`2Sz zV`4}7J&GJHgqD#4!z>8XLyCTjBL6UA!HMycfzA?A%v7|IwC31vDLOOv`u8bV)S5$I z?^^CJEv$(7iAl+VVpM4q ze*xzVpbNaKd*5NcT?U{-1+9Mr={PVGz3SQ+50)}8?EgJlt?9nOw4LJWNK7uqcjcIh z`E8YT>PZTpTH^a783*3YYVri-=S{W&EpJDRzJ4pQ;Ht^a&qsYx7_-KK!uJCn;MvFZ z={!FU8^B2^z3NJNY$?!ehC>=#b(`+H{eqEw9{b-)OpJ|>8kg^*%{ls^^sWjD+n|6T z@2c52xT=UUukW-%el!-rvTX~}_YD`z2Q6?bpp_5t90q2(yy9H6JP6hAbBrTTdwvog zSpv-k>{eTGB_*O~F9Bf8%=mG6x7!eZY*-Aqgn*Id5cH*|^l001!Nao1c{O|QP zO3{jYdh&rf6C;rHftI6z7To7RbRnMEoc+pS-PPFpRX@A7I|}zlm>~-@PW^A@R&v1p zZSKlwHkAeeYpH__e(UG)FDfIm?k~qe4^)Rv0ZcK{2^aPR;qovn^!aP+9}H z1+RAw6PwurjxKsAM_0ef^<| zFxnvXe-BmxU#G(ae5k)6zK-{C7j&G0*kdEEPN&39Luo`W91>#Da*y3|i>P;4fDiOsMbGb>0xws>#G{5J&!ceMuboo1-G0Wu^1H&bjO0k` zm`9&UcPM2HDGd!PW?aOEO4^31LH(c%h(1buukKa^bKv+eH8p+j&f_yPq)lJK#te{C zE`WfEV5Y{j-R|bzNI}zE|`3pZB#|T`l1cE431fFE# zPr!OcGmd2RD1*-PQ@oBwI}3Rsm&2fF!ca|IUW=zlY$^`;L(5Z;8k<7Mi$4@fl>xL@ z3#DrF-1V<@Mm~372I!%aumvOsM>2u(&=w{lWF6hSpAo-;jM%e|qH9@eSt$r(|LZ?r z<6nT8+%?KO92@ErXCMj6AiiNS>Bn|-a;iTa=GUyVC$F)gZmdclYtCEyff#nW zJQ8xbv@~#T6=j$&XK3b>fg)y@|286gh%;Z!gDpY&Y#y3U?;WHU7r=O)e_HRM^%jC6 zLgwlJ!k8IkP+kQT;W}=ME?3?{P|t^ij2y2!CC`eI?48qrjE&bWGxCNfZLbh|QxbQE zra8%&gR#koeQ8iWL*X;^SSkeR~&D zh>8ROGPxEr)PF<6(MtJrzGKjc3;IZTHxpDenmsv>L11gbIm^jS^t`Ej&CLB42VwHv zn^~QysnYG%BH21xINYPY?W3XXF}7M>jhZeP zWYX8+o|mao2y{*fCPp!`x?s@oHA($#b_(CGdC3Wv4P1VaqHAo%a7p4J=4SNhH(#|}*AFU@WGC3m@>#!tO3 zznXTouec6~94~FK5TD5$_RTd=JPD`V#15A3warbhJ|pV7^`RS2a!# zLOACGy2BU{5-9vTVbcjv{eq4FnSy8+RCf{K0zp>+KE!;sUGK2PzMYDO`O}NMY!3FNI8+i@?!s{#x1NTQz7AB8!xb=DK|UUa1Zf(TXrsJ<;gS;su*iD zF==~BOLLsQJ;@^{yZwYwGVsA~%=>ugz>XUK<0&}UYK#vD8dc2xOcX%1Mxl)k+IG!fz{G#;?-d?K5??n> z3Wni+wW<|m?ew~6=#iC`d6H#9ATvxMDzTqsI7k5#q~*9L>-^g88tfqbJ>oCzs%!*J zL*D{Tndvu@3?%TdoTFO4G&&ujR|cMWt`HNx_@mz-Aeg?pgYmvU2WzH85brVnwra;e zAdPi7UcmmgHr)>Uqtx@l55E$IWt;g99Sgv9%l_vZMh9ibe*kt{*{^J$Qf(W;0IHn< z&2u)kx50Cl;3W=UXfjqG<-++ia5YkRnXKM`k+<3@!A|p?*(45WiTTbb!i6&>nPlFS zh_iwZe4jRUgVd1H95&A-|BX*;fcq8l5x6)I)0hNP_IT&faXs&^~)coIprPqPoNDvW5!>1A~pCN}mHf(EYhk7N*T z?BkI?W}%Gfh%}jEJ(f}!I-9`uhl`k^xi>f7`~urpOYBf=sf@l#q7=UWz?W;r{Ci69 z7&)bT3KEFh>%}UAhMf# z_e8ACDH5NQsT&eTTR)ieWtbk2I}LvW?Q4smiV8E?zr>qR}h zQgOcv@(cJw(RZWr%dH^tqRlO^=W~19u%|)!ejL{w9H%kDfm0Y4d@t=kNJ|%FZnt3q z64)RNiat0rV$j$iAZlnxo}}Z52%1?jiEAlo<(c6(&iRdon%U3ORe#b1ws!Ppl1g3^7jqNnbppT2&-WaRt?4cf-Csmfu!gC>nEe`*eYL>gAr9#XY85-;?iL#(cnU``lY}5LR@i~ z-)_4-v-#e7jiOc-bkM9si56h+M{kh~T=p?yukdVbeK~=a`|}fILb6E^!Igyl^R$46Ws`A^AC7t-F}vS!@~oNjN-YsMkZg}<+wJ^0 zluDt0O(uA7~i6nV7|KaI({x@@YcciBVv5)t;5L z#9qm{)-jFlvEzP-;UEAnLe2D-LD#qT6Fur6bK(C$0TF0b2uEF~9fZ8S0I+BSoJwy* zL_|O;De>}&upe?4?%kmcW7e^}mgdhyYP@^oR`yS5EySY72m?jxwiVIJ;fAv?yhS5@ z&(j|yYB#xbBL_($3uN^)F1m3`Xsm5ceP$IH7#_bS+hmPy{`fzK*B&2TFAG`XB}Zn{d9~llYaVAGRyq@;?O%PT7~~F#HxBJpBE2=m|sM6 z)d5wzub_1bI>P~o9r~DTkkcFKa&^iaJaGa;s2WqI%-&5(XS+sb+tZ4#j=@d$+2=V( zUw)_|!nfN{M7tQ;>tW`!LYPk0X82hx_uh__M2DD((z3@M$5{fEIN;$(mqqTqJgH@- z$geEQx`q531kX)JjVqt*RtIUMD>t18sB;JI@V{*|1pRGLh2u&F+xku1GkH2+8&d-^6J;6M7FUWic2hfxf`4wsX`$k_mww4C+Kp1 zSPtNV-o=rfS3@{2hX~q2t;)ZrYRg>(mLIbGo})b8c;+sU|1_|DQ`GN7s3Ds;ktuk= zGQseTZ{E=E!K=9PtqUR!db~sTMU~8g_KN{{5-u7Q--I+~I;n-B@(028v z`M7yD>OO;guWlhd>v-n)YtzhL&F@6T@#ss!`hx>`kl1gWfj6|hgTc|dHK_imFi3pg zE1iB>sb%>k!^_}heG!c!LHA#gpRQvs#&5dh^KLcn*}WjDP?xb#7rH?>XhMZM`z6~T z%DyTaI8kM+4xk7S`BG&;Yh>hakzEh!w~aJ$p-poYHmN|G2fu7Ob+5A1_D|+g#8Sax zXy@+E+F|d9h>V^GRb<54Rf_r4`tkYmWLst3rzol|UFQ}DFYm{^R#+E-qVR9n)AH{x z%8TDxA!VX(I&wBCg}^933pYSB z9Lbd-fowgtZMvn?8iD;3Z1_?$c`}-9t@PgC9Hr{rf49~ibRAG(BOU(|LkF2Q?{=CK z!t-Zfk7Ismf!?|wED8Se&Z}IyX9%v{COl!C+#%(;Y%E{u(-}F2-*@B2ui8LXpC@7M z@$}b>)}$i@t{-8HTQ1o*KeLUIub3TS635qxch{) zx!P6i59!Kq6_y(eFR#|OS_Pv7E$bjaLa5>LwzvEZr9A&a+MNIG$TL6%-3>r=&`Ss4 zcPKi;Sh9Jqw@`^`s3|6*1KHIM>nu(m(^ApYmm939sT0DC!HkwX;uYX;9!jqHrQKD( z87J*qs3Co6VAdq)wYs+eYtl6;g9mZb&mNFA7@~WhWUGxAO2^d6ME4dpp!`tr_Z0i+ zxCZSE-I?Di@6Z@(2^HT)+m8Y4sOmLe8pFEL=W1ilO3}m7SjGjd2Q$HwIvEx?z^aU=Gondbx-0upC%&m$l71Eufm2P%bHP? zEpKf|J>Q0p3AVDcq)OAv^Vn00nzA3`nX%J=?D||rZH2&wopuNFO`ypW1=?=PKVPWu zygE(|ZXGOu7t(){so>9)&hZ^Rgi?a`f}nI~3CmjuF$QE4p*5oaLK?L;78Fnr2p!+q zD9FpmZfVH?T`qZD&wIf0|L(RLV#g^NrE*3#QFYW?$$%2##H>Djk|0i}saqTUN|pZF z`JQ`THW+W(0c>G^y9dP8vJu2!(v)B3t;=4k^-%A&Zq0utLuhzLJZXVlQ2L~wEQoCT zJ3alcWp6n1-XTd|;@=u5Q+ELmXJVmUFT38@5Esse`ZYf}Y>ww?wPWTvd^G3a@ZF1h zOCxZA&vSnlJgr$Tt+LR`gdZ5v_O0EZ2P-hwhX2@ZVd;X8A5C+%yg8cqmIU!rPV8j; zs*5#|0NU?l#AA$3N&}0~c>FBl7_nlc;=mf^bb5o;KpPM${s5{N3D}^Dl zLz%Qk7xN}seA|_i+|!J==EduZc(V@8^y&~5rqQs2Q=M z{%232EgUIV?ED$Bs&7!<$Nv}X)u537z6Edw}xCv<|Fr4JeGs% zuTHPGE2j}PS0$#emm5a#GXxQ8XQe9ZaY{IazcA*mog;6`)^PPFwiJIcNH~mhrq+vGs(2A(5iWuSh23CcfvZ$R9O$b4m+gW~R?iK6j z8dga%29A?5Zy_HA9JcQO7=2LUGO1@?l)0M9kW(zb6}yJ;&Hqg5oh< z+;&H=Ja5lJ))l0znQ;R(oKHRI8zyO=RY;ew8N}6D$MJAhP$E33Zrjf6C=_3L!)_of zE|Ql%^AT!~wW$7Ox>|@i*uJfq?87{P0MT21xZnnTpUEtS&q}+4Wx25QpCDcMGbgoy z2H)?|!5KcEwUi^KXkXbygdJjArY2%W&?TLY-jwnDfIRR-k@#6Nhb5ev>|ECU8Mpu3 zyyGcG;4aHuQxi7FTG>RvgG$|yOL*TU8Ma!)#C-*muYuicE0;aO`cXKPt~Xebq#+wc zw+>EK8KqzcHJayPF(>>6l`=4WxYLx#X}l!dow$a(>_gd7_}y?yzbe1?)zy7C+O^kK z1k(8Xv;Eq8mxKT`5*!jfE}K*$FIcORQ7xMnmR9_uPW7ydUTuq))THwlzB=}Iec@s~ z)!nk(Tfzn zJJeypqoJZLWm082&gZ~C?C1McrY)`mIBR#J9lPS{+@_Y#=t!*wBC|$t9wC(uAIp@( ziS1qs2I_|G;ODG5^4%iar!pnzqU(`02nGC^!)Av86tEb93#?m(}a6EXW@%q*$k&MeoZg&mbhp#7!Ia?7{|yFW#?; ztnxKOR%+eQc{~KtJcF~Xxb3|#IiZba+57*1-|R0+(qM?W1H_ynE-swFH*NrI+!VBv z;7d5Z{=$yZ{9X*(-Fwjbr`3I)2Id=mn3<^iiybWBJ|+)#V%`oGv?2f|f?guH|-F^E)3(PnN7K zC992qf*5V{evIVk?G5%}TS+6r4k0a-njJRw`5WkJ{}8FRFCqUa{8W60Sk4gVu%9GoxLs zLIx9pXzB=OY-hNOcwlYwy}EhGQ_HIGdo=@te#hrRefzIWDAOb~HoKDi+|Wxzjd=^BZ?(WKd*={1 zv}K^DSre%x(es(TmcWMLk;_McgkwJWMuyZ$bI0TeQ$$YQK6DO|_CYrl;0KWLC#tp-M6T9=SM-fgl!};}jYTIFm z?r;$42SmQqDJd>Ue1A~J2ZBUFjByr+6f{`~)j)iE`JTv+h5-Jh(7$WN_z#mZ#SaFl z>r)V2=DHm&Ex69xK-<$wE_KZ3vE`}LZvDc~I6|NOEL(88w8Q^4v9(`97l_vy;O6@g zuZ82vVlnZ1h<>Ko(abHJlMvgL=ckS(`|*tWa{cM8Mdm06Liu15bYt(@?>p&pQSptW zU7>01&KKM4HK(h@6NbNYT~JNH$H)Wr36|D_$9FQXE~540eI?wAo4bfuDR=5pfi5qN z9gXg@>ppk$zlWXu<@5imVcDo)?gWhQIReQ;==44)6Uu68YW|!awIl0NlB62;Yrt)} zvL1O}#Vdk8DH*>dW6f*}X;KZZ#iPM33A`zhK<`;AZvoqDTVHHex|IDzoXS*@ME;Cl z9xi5Q>*K;bPwPlQWbdroIF*MqEp}?)k+lLGwVUAQhzs8M$6aoJp`Ar;#-gZC`4N*g z4SOziyi|Tvivj5=hyH(WWhmF*Dk}&=xoALlCA3>BnuMRtu;Y1nczB9W=W)U=wk_U! z)wX3bD+a6S17Xt&as7=0UW_waH05g3afG#8Ev09#IKzI`S4kZAjl5niJ;R9Rb1Uc; zY`1CnjY$KZ^FlO*X^)0Zz4!PgY5OqB@SZ|m!ZXjjipS!xV??!^Ew^=C^S4ITp)Qxc zzbl997=8PusX<4HXtuehqDty`>oPnBkN$z5Oh^l5K>99W^18xh;5xLp`t?A(d?`0_ z8APmxJJ7|1R7md`8RL|3iY@|=O7G?7FXj^Jor4K5*!1_+%suIeR)6t#VI`^~8G_I@ z2X#G=ms2K-R`#bk@8LcUsm07 zW;n=-5n&Pz*0R=z)8lNiad1S|Mgl z9K6Oq@QtrHtgAhX7ywJ6s=X^EU13W@w;S|Y-PnHre?`^*y&@8PHe&>+R{{dzW`OV! zsvZiMaq~0}yvEMMAMFm8zV8+9dd0N*jhyb??tPtgnw@2j@B0!&75kjx8^w#b`2JCf za%Cp?k~|rklA?amU*CHJLVr=5V@EKW;mk8l)8YuOSkS5oYGu)4i+9Ljg}$cRV2hXC z@}0eG&fIo;n0fa)ewRv?E!JDl3U%*(Z~OY*cVsKm%;)Oh@UTa?qNyCM#kuH}{>SCg z5NpH$9Lbj9OTz(D`!p?ub;t9gZE-RCKGA`k2*z_(do-Ht8r-cMMj8IqzUh@N?m@#I zi_%lxJfGNPZYhq5<1JfvZ`U^BvkdSOH7lOAE*SOUT3yCj*gK$F&)Hn6NkCM}+HY~a zoKZWF?KCQ1$<;mNN^q^+~c(J`O{M}9b1b?1(e7vHIY3& zmhbH+S(}N-TUw&5zmD^+ca>wTQlqrAG`Zn zo`|d_pZ?fg9TTsfZVX;oHDg@%?XLV#j!OXpfR>Y_N9L7LeYi**s}kN6%oWl*@lfa& zG5n`)3h-ryFmhVA5`bI>Sh;3eeE2VyyqKGto2hMDw*4&XElt`a?W*<|h#%7M{G85M zQ~s>@L@W{%_S7Jq{Z6j<2kyiVcA-;R0ZSPLPhLxfPV> z*2OyVk6;wOSLoedm8#D~GBlPM$9vI>EPWGlEp&iI}mD%_f>oF2)D5f6d zZyh=0MXa|B34`s?gK&Gaw%hy_<>VdN;sNaJf7ze)DM zzEUv#EMwWo<76^y8(#&v_d}MJHrnO4yz3>U-y|hAEHlEU*O$`_J|C#<$)FqUD50VN zy*Y90zY0~0pllphCrPb?nfJ4q4p5IJos(;QH{u6j;V` z!@C-!ZU|w_OFsDNFn|4S5?@U2OSb2)B3rqeQO(M4x#xf4YqzIhcuAm^U24>zPjFQ^ zk;sHm#b!NuLd5;siEi^PVu>iVTpN2Qh7SV?wlj1p$H`PZ=H#2R*ASopYK)AAtk1 zpEG^d8fJ|04(#$5;?~}*xOm)`blr(rcIyR`%i4k`_L1a^ zPlmz*@fiP}nKM$9IQUq@24oLMA${AY-uJuu)L`;&N5RjtKVaSc*}<)3$W`WZ?^{EB zL2LSO2>j1am8&`X!QuM5eO`5HFDlAc-*S~ixNpUxo?2Rd;Eg6PhqKy$G0pA!4##dq z|0vi`7Q+^vcwue#V~oBZ;$-mU2Z#+uf&)_Ok%VusihH`^VJ%{?U%1MC&vV#*{1h$Q z4MAipoK9g~^m=6*!O_^&fO`8`Offp4iXw5Irvqk0MsZrIC%azV!ZJatduU>G{)Lgy zkFZfk!+{H~A1_4L@BaL%Wn}qbe;E@kdSe`FE<1?5Ixa#l;wjVs&BMfGf<7M$$G~+YZ{pCsq}&gS4+0RkgF32w&}K?mabh zx2nX&=O)QAnsHhw=Fs6mcSVmOb0bbx@{`&%hML;A56b2Y&=(ZU-d*l_ZutaS?-LkV z$I86wOgQJUwdB0(+gD`JXt`%GMn?&ViM@UO9V6zA?!+2jEF+;K{QmT`pmF|H&vJIQ znX%C>{KvOE2EMxp?4QNbxVfbzRZ#5MzE*@O+O~fAwbHtR_K3Y6W4H)MS3(An*bXEh z>uhj&ex9Vrc$fGqgDEnzt0C&WmaufK6D+eGLLl%)lgbP812ZlLhD1N&CG{m{*au~Z z<_LSnkE{`=4y!mO{kQQ`b{WVO5G(&$uvF2%{(~Gd4EkA!dsabrcN;#0>|vEmSzrG# zw}%UEY(JQJ2_7x6^)}>~v`6xf1#wJU-V&K2UIA=gnXC4LMLIh<&+wNOb^eur`gr|_ z>hg+Iv1h;Csb1wN?Qk@-icmlRRL9 z-*AQX=;!;fRt4Tu3s+mGtA~b|PG#DdfA!jI`)qCO25UV_Xf_thq`bQ819 zx9m=4i-HxtjSl($9?y=r=n7p_#Ketk*MnLYh%0nlCRKwVtF#2dxFKbNdy38ED_+9E z*}~Tx<1T2ZhF-y|J*{ls248b%9X9MI&<#dHSB1Hz=2VESf2>qyHO56tK+>E;Yb5Mn zpZ>6B%w71ze?h#|tMw#pj4RC%-8X;#7wqhlymA7pji*O&Eeb1~?CcL{zL!m|#mDXz zM0>w$fjK{blw8_NAhT$db97rClE65PV<3xBScV3@=8lHmkkJ1x{_9I(w9_8HyB=&L z*p`zoWh=KrVB&Qs;XN@-C)Sa0>-NfB?Z?B1U&IB!@MrKYItJzqy&ki|%E{Vf=287Q zbvT&6F~T0(m*jM{jGVrLL_fXi+O3|#4w=^((dtd4us*OcG5u+%)JR~bWVr8sMfAM- z@?c4#zeY6wkl#H}!Jx2O_qVLcld?zF07XKggPHlGbM;*$ZyOH3 zv!9cXZ7&455LHZ7n&BgPaqrD9d?4r*by|#rxhO>EhiCATm?a~e7GGVESJ}O`wj}@i z3VMzIZ;j%_Kqs|r!_MpeO*Xd$2l(i9`^_)vm=RsM#&_<|9`3x*;uJ{ddM@7t^182? zgp@QZ(vUr*DZ}c`!8HbxY*lNqMJeJ{7#LO=&j?!2RKj~oT9NMs-JX3TN1LUdr~mC`0xrp@Ngm5gmWr6n$W1t5GhZ$BiY*AL;&?UE059Hx3fsW#oO}lnLg>?H}(iRSMoi+n7dw8-5Rd9}1@5vew?g!+-{7d_ro1@2_7ke3t$(x65i+F@~3vZ4#V#tZRZwDVg8e_KZ z1SV9>{1!MOw3m1wv0Fi0XeV1U*6#=&(`YL=6RC#}RLD7(xa)p=_T@zyaGc(VjS`ds zjeRHmXf$fwfoGxwzAb%53_2|6TJdlrB#+isWW_#=22L^UbH}?9$cg&4{?``Cla5A@ zQP4NIG5F(`#^1-7Us@VH;=RS5ApHtzxI0!^pd>9>za|pOx_7;!YmRk%u$gqNuUmIf zP+8qCpP9?SH8m}+=GcU&p{d`^EzeMG2{kS{Uh`t0YnyW3zD-l|l#~b!(Pqu^4A|E; zG{>M&`aS1gjoY~If=?P`EXcv}1S8q?nIBFkA9+ffOT7Dc1+fAml|LN`ZiXf8D=@gb zuk*QlW4|2;lt1-nG<4`Y@5m*USvF^6sKqVxI<#YfS7;k1nfZ?_Cw$ zy~0zTzv$S-W?mThlRdEPn!Ydc(dfVeBb; z*_VWD$-ajWiL7H62@S^jN4D(E2sM!{AyGtwvSf{Dm?2rmzNWE_eeb&$-}lWqbLQMR zbMD;xKF@vL=l6Tx=l9fosT{s@*Zmt)78SO~*oP)lQiJ*jw@_1*PlXV@dI)5`+M3iO zXwd3Bu0olZDrhk>2xpgDjr2NVfx+HG%dABy%}na(pDITa>2jRCFJrhUP9TAA)V z4NPD-U5mNC)(v}-mLyU8=LPwX6%A8AhUV=^B4k9H+&UHI+b@|ObI<>9G>8qG;hxb) zn)#o@jD%JVWH@!IVu3E#QcmaarGI(_(=UFl^u~!uan?NRxra+guc6EMN3a&AE(HuA z*J6T@bBwyMG6c-B;So0+B`1X^?i7BdKD^LB@6p6%U7TIU-B`pEkrKLGB~^8$3zcw` zaoP0XF{#pcNo)EDTd!o-qx&zxY&xDx?G*J$yD$2^2I@T!KrNOxfEhL>qZvLswIv>D z%oR@_taZU?+rDMa+|oiBur`OVTIt`%gnP;!hNANns z_}~(i+p4uWl6*^&peX3U&nHnNL+Q!Z>+rR8Pawz zeIb2ws^FheYD zFL>$4-4E4X##Zx{903eg0QhVPMrjwA`_vbZ*^L{C%LY8kY>yfJ<^e0uQo}RS*r2da zH{8qYN~-c&;x%xnYHCHwUO!c%oYfY(qCMg9hu1B2`$&(TwxB za?hpeuVs0H3dc3hVy^a<9=sN>gN}mH;tSZ1TiOBPdvMP%GNZ!8x)}s9?OC&!ZRAF? z2jEc6=Q58K3@jXQR>-5~or=b=Qn~IP66G!DL%Qp&VAyAMD66I45R#+G8g}`5oLwcy zb-CEO7fn!?^^A|*emwbQ$Nhkiy&Af3l^SVFsdx_{E$-3@nfD95jfE?U?LnTZ{PBDpRMu))1aoKo}#mBO-%)3nqaqT2@ zK9(%GK}(n6Z_u3%hF+o(mTaiXL8RAWo;mUqhea86!>CSROr>ndwkN2VG>^;04K{4w z7$~&~)L+sQf96LA>| zgbrrTXsdFng{VMdR2C5#stOuEW^Q6udKP}`9c=*2F-c;t8E?j+Wd`Igr!mhPf z#C6Kado160R^EGDNr+2&AtxC=x9TX-aIrpZBVQF48E_F#Kyn8Zx<(*FK_e4A6j zy<&H}W!3Xsl!RK#nI*g;gN>LZs{4Wq{KwMX#$~id7EdEX~8dy&j;earaqbf90rl7|KI16IfVT9@Db8k;(0gh7` z@1kD{tw{w36Xf2(qSpH#%!wA^TNax;I~z&%hf%Y7Ed3%5WNqXgOqOhe{U%|wmb3R*}Errmd|tr(A=8=G!o~43be@#x16cOYh{!5 z8`v43rbmJ>_(?deGwZkdhNWl&E}j@)UCD$+FbNP6kn~ds+Q_QP%>bsb1tGC?evh@+UbKDX^iwc&aPA{k?j!u~wGv8naKJVL6J4C1 z;Iye~12_4s!GrVdzPlPtA5j-Eu3yrg-Fg=)0Xk5|mD6FHE%*aW)44eK(bgaw*a6I# zpdAD>5U~Q30a*?&zgrK;(d_A%zG~0bZoTk zY|4R;qW4PdX`UVEd^QWsMN5}vDP7Yt2qO*T_$M3z+SX9~Y;K&p5Oq%1_fPe_NIbyg;E<46)O^#Ecy~ zx5(Yiv<=1P;+tELvYijLk52M_BN2+ zMpy>~r0z<*G9G>nDTFBP#|8T@JPu$$q&~Z4)iay0&Be{@0`zkot8AK z!ld3-iC7J58$;8i;WQW{s)p{eW$HN{ism62h3Z$VIuO~P4-_wbjM1#oT-8H0q;Sy@ z*;cLPy%y(zH!(a;l zDMT#L;QxauktI~iao#na!5tCWzrI9DB9VU zG%u;vr)kDH(J8?03=jy)fYe!6wi2uJv*XRcjtO1Wb*9+3IG?e;)mI!qJkxz-d=g}Y z+rV-O@D1#~ZUNt5M?gS~2fIFpVdvzW0pwz4+Bsp$?i&^*s&(gehko_p^z!r!!Y<2X za_9*?drLI%yo!w(gcX_*zks{#>xv z-n+aS%hMz$H$%H+YFAG?+Bd_5x5hlo)5Gq3ReLGi1tny)7SF9ff-=%${sSd~_1KO8;z%M4x!} zm&)2&Me|Q~-60bPb^~R$7>4j&E$}n>$tHile;;H=}tUEX$!QhD|ls%<(Ml$z`>I_<4Y7CSlifu8*r_Fbpf2p4wq8kN>-}R zhbK)B8`O6usp?{Ns7Wy&2XtIz1#WA%2>&+9lMYdxkNzMvEOm$+Cb7ijhc>spcvbdh zM2eF+30`~XdA%GJTJgQfGN(5*g`Ms;3g_+?;K7{pq4X}8@nHqd!s82!i!?NBtKZMx z|Jz~(5DCNhtpez2Wz0oPSd*T+8~%C`&%ACFs#VC7oj0UN7TP!vMcR~ZJu&_5lQC;Q zkAs-K3vg?Ne`VDAx@yq)+c$qtW$El(1*g%p{L>Wq-5of5gW8@zd5*t^&z7uplQ!Au z8E;yNu&uo=A4ksX@;>UgVJv1ngZxCP-|__=9o`LQKgrV*hh;WcXpSI@Vb}umU4UrC#qrJ#yH~ z3Q&B_NnL{^Jzf6DI)%)^HX;@umacHkA;vXgeJNh2`6y;`y6b)SMws{$1@3N>hc}Bg1=5r zp@FrpcB>sr3(bn9)4Qc4QNm1!azFD}!EE-hWY8?WRHu{9Itu+MMsskIYK*MaEIPu_ zIn_`rwbqhc*XyE?IF(qNeoSO97yllqcqch#zEbAq*wdn>6HvR|mIdL21loU;E zzA+8GVoC>b>|Jw!b9Ncs=rv;%zVK^?KwJYF&{cFbJw)9r)ORyTEJRCJ-ncROs81aX z9SWo>{wL{aB~dv!k(-2(q;;57tAUMyj7Xr=)QAkeXCZ}D1Y$9BIxLRAb z%~5`?di`zpi%-&P-NG|3Ln!dni@m(&N#GQ)5o4kb_LG(SWyZ=s`ycthh23M>vdv}w z@~UuFeW3jm#a|~A;3Vm}?n3Mc!(A*mwA%E#XSeksjMudR+Kl9WAZyQKBX(!-^LX{o zm!kbVZ>+a&3o_R`uNW%Ym_CK-o_JNfVdAMtGh9(FV)_kG0I)hmByvldiy7^-jH75{ z_k&(kzxxlHiPwPJlKwEU1eXK&(%e2)Z4VVCfN8+Vt0P0b?*EVCJD!|c8oV;V`x|fX z$W{;B6S-(KZw-DiOM5Z-M^Om1^sU4;J`G009cl1eBafT~Kw8y!g)2>^*BXNoxB0)H zR|4Ix7ww`9Q-F3cY2B_xA?)B)6jcojm`i7L-Oir*M|O1I=l-#LBL@lax_!$?yG+v| G>VE(_4Jm>E diff --git a/g3doc/images/mono_1_of_4.png b/g3doc/images/mono_1_of_4.png deleted file mode 100644 index e111a643d54177e5da37f5ee78c62a66f39e5b2f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 21425 zcmdSBWn5KV*FL%t1q{jnX+!~$l7>yHgmkxnf^J?8hKcyluW6JamG9;# z(aDWjo_lipZ2l&-z3U^tGxEoZ?Lp(Nm0=qx8zybsEyH)Wx|wAf9?%pAllZOdS_iPi z7`?kXAL3FiUK07m;`Ih~^(s?+!2b5Gi;OIxH-V9)WMYu3-eRO>y}962!CJ1Rz*LV| z2u$q%AAe0(;$>8IbaWIHo+>h^IXgRRX~i>i5C?o1hWY#A_MGc#%X8nqujlIeqBgb7 z!|#{pjAqmJA>R1$1}{5%TK?+l>K~=c2!vWUnughGy`HKnA)50mw6S_?M@Ppo3l8Fd zJ?1P~bOfTw%~M8Nnv8_x)_zP(3~RL1Wuu`kyr?dNbGPH;<00PivNGxMytb~hiI0gq z@@NRe46l$R^U=n~>OXUH)Q;#F7>kFKs;PZ<14O;PKi*coY=l5~&JiRP9U%BIFR{aH z(P9wNo)`$k^WTVxJ;H&R_@6;lll9O1oBIa_Y&ItO3r1kZgQKJBYHD|F&!eNG^YZf0 z(a}>=QyUu_Pft&~y1I1rdwFA|GV=5DW4SCp`Y3L2Z;RLMyid!_G-W5o_iff{@|vu) zp}l|qehQk#F9!`x^*j~3uEXtx)9O2dx%ADJJbI{^a|rd=iv@3=q}urYe5AYB8T>FK zBf~(ObAeXj%fZ^8KYu!TWu&Co+1Weo#cH+Be}DIi40O9kUX+?To8G8?Q1PRuhp>qE zuXb{nJK?RQQ``kh>`iICKzzQYe;yEF?nzEcswB27x1HOw5KDlmgY+zI`|9TAc4jX8 zY0tTKV14}8FTU+Sm0V@Mj~*nOy;KQ-pFUllEUK<{2_$4?Te+14^Tew~Z@- zAX%TColRu{v&yQ!xnw_YQC(Ga506z(Vuy_P=;hj^uhph-WDd54cje(O^nY$| zUw$6=uFoiL?R{2LOF%%tF7p8ezX=-h@0~tOVg!P6`cf%YDw~V+Wtwi|)ZaumKJY9L z=&E{oc`aLFVqjeI7#bbT&Zi(Fi;s40CTn)K^YSNT?%sPU%v7Pqux&2%&!!^z(45W8 z82I_!=E)Ql6i!ZKGt*ytECh5!GS}}s+1}nheZ}?nTK)DyPwqe%J11w|PT-m0E-dM& zb{pwL@v|Gf<~G;jygnna5eOmA1af5&WZYVvc7elu%`m+4nM@}eug1Y)HAj8vb-Ugu zdUE6o#>Z?zJ>~{pc&J6f>b;KJYfv)kG5uT8r$g>AcM!}5WqgPS7M>O+CR><`u_fC< zt1Km4B|9@3&o(b(U}B!)vPcV~+y00%2z0-&NlGDRJ|S6>y8BtidR6He86TgM?P%j; z_iooRqrp;nB5K*ukkr)rMn-Mt^qV#=F4aFPn|0-L>&eK-E*{2^i2|cUB&qY<_DJYrBe8n>uJ>A0j8nW@IDXWmHF(_3*~f(%7tz5ev@kb!xX-igT+Q>^ z&=9jL=AYpz5R6woZSsu^KYA1|!=*Xg+0)~(q;m}Qlcg-y<+?U)|mX~R0 zY0+;PN=SV4z=(i_uHRug!=7Og>eP0W$*}W4&2i2?(VQ5zMm!u$@v_u z4!86x#`(p#FgA1HRG*!l_4UcWiFDat_gh@8pD8RWQ+Df(_B&5!ejq$UeXw8X`S-Lx z^ABx;f`h|;Tbu8ZH6J;yA%v{|Pk%zU!E4;Ua|agg*RNltrR>Dua}jc1G|F;nYEE(s z>WtgKX9O+Qss9GLHlHRgxg9$XF(O&wX` zC+{gU9T8@AFP96V5F~gtwq%>HQ=g!61%dd+s-&Ueywp!!FmlZB%VMHxsK3A1eocdz z2yX9q;^b7>_O*w~%*<@{SEbGEG?XtXU?-L~H(xtF-cJ+>_U)T;KL+z*TEhk7cC`M} z7W`0b!FYu1Y#P_2t-XCl2*#lt&eN(cuc+8pzj2q0jFFjn!58(ZJ-nk>5IMg;^_1(j zw6!&>5BPLMkw|La$vjuRdS{z$u+8zF`G-LGSm=g&z56M=MM867QBl!-u|O--X5!si zI$WQq?wR?%Ioryk4WFn<j~H zz@mB!023wE`cyrI^J9K^G--9e5&ONpI;;`*hfg6!Q|8>jXsl6K%1m@e?&GA^B=YR+ z?OD5`=47zY9UrUu=ybD{qVZ;HV_9P50?mtRdq$tT@W5B8eMX{UVvXF-Un~xI`j`sZ z951E@aks-)Sg|uRGgs33Nw_SeNNUvW<~yUQM|()~wW?d>QA_u;({wA$*sE7rS=pGK z*iP`ul`F7<12@+Van#k-Nl|{*yS32=moQYiR(oTj+QCg=ii*wR>)y5kXKVmEICamz zYA14UiV7+#qc8sZSPe{~6qP*cYc5fSmjOCrKXImBssb@ga0w4;^)5>ivt z;3yNhbx$ZLFwpv+t99HD=SrGB$HjF{!vw$So`Thhe@5_&-46>3v!a1}C?9|U$Wx^) zzy@>;Ujig?eh_rt77br{eh!O>pn2+tlSoKFV9koU>1$Uk7wuEeGdQR^ zM_b@5&v$jscN5PK6Qhmc$RX~DOG-+5BTnZcb?SEJ9PI2)kL%Bm!QGy(Z=3~&!kZxO zZ63O^+;#Fe*%S+Ey_H`}#%pgjf4!%(^CSpnu)usWUVlnrh(g6rt&jKC3je@*ZG2$i z2k^4_tL}Kcx^T6yrp7gp01qu@0IotLAItMyzv zl>;{?XSvDHql-A)keZbAl4WzyqT<+L7cA3@3RnR-fz)$PV;FD0cbUza2^|`|;yP ztX^0%->|s2IB+&sFo%6`DKIi#h5lgQ-xECf`|NiViGrGGPlKwC+nt%~{|x;$CN`EZ zRK5P}XysdNS$TOk|GvbZ{_Rw;pi8Y7_fSu#q+%!TqB8u0Q)9G|{;bcbcJf=L>R?wx zY{He;E`p<6nu{q;!^xzQtX~-^z`fD>7QO%UWV;8J2qvi3;({|cGQw35DRr^JDWrn# zt}KVU5N`*NT>f(I^PNPS8>N^x<&TecSEi+V95yDgrT$*5jIdYhth&@k)T#Co@CE}o zIe6nZunp&JAvWPE>TR)tY!Y?XK#A|{+wIDWoyRjL zWO|_{l~QZBGFZ-MxZcy0$Sm$x>4xy{mPvf5rwu6NQcjs@waWtH#38PI1bpJEj5UgPR$(Cfn z2ua_f>+bSUadB~5?X`;&lJba=aV<4SV0mqA_t15kk;h}Rnb4o;cl5>Mq9C*gPwqPM zK}6P3qg`qZqR{zj_5C8`3Yf^oUR;zO84;lZf0~-~TRUJyyQlk8IR35fK-ggSeRAKmUGSL_^Zi zEp<8AR8agh{K6WmHKbe?xU3PW_Gn@YQ`x8d&c+XCdfuDcDKL^Jg3Pc|z1nVDVEo?5 zbj()JuWFKb5$V9L#2+NGqg-(+1QT?}5urFmXT4$;s^$TGW*YlP&fO?i^+4Cty_&Kh zXPC}osLPN@$qfgmhO2ww0K%?O;)U&o|a}M z0hhdV!dOg8OCw7cUGZ5D$XvZ1*!{L6|CUVkcqI*gKncn*0wDI^d0|G!MqyWP$H}*( zu60e&?&q)Wp=lqhd7RqY@f zp}%WjU@-stcM?|hty{MUnYE>)rKP_iw5q0*torf05RBycohH*}aF4GYdC}XW$4EIWTD-xF`x-;cgGHelu?IY#*i5V* z(cBdrjNf=&yFm5VZNUdho|A>Io@G}*ih%{j8RRV~DdAy+z1CIL(@VuF)6&v9yxuAxqM%5O ziu#L-85|i&%elC;B%Xy}Bz{nkjXXlD`kj{ghqB}*ENGLf=iW%{u5;~Ip2v@WPYG!a zOiaYfp;e_6v!;~bc9$>8jMA?T#YMm>7Im!IW7$s1*~z-Gd2 zr%~JLc6fLQN#Df81eMgW>PN)d_8~C|N#lXg=FOzsoE*-$axs|HnUugwp0=x3QPy+4 zmnL*6K8QYrBB{dlQN2!0JTyQsrj#kSzS>-LwfY3uwKl_54a;R*J}>@koO1)J7t7igGu_w=ax zZ2<Gc2a8|9uhPH`GPSyO$;kK@{q@=&I zVxFi^E*6;>+R=o=)P&}pA#Ku)5v7{_Ntd|Li zj^ajho@)=cAFZ1n$XQHVsL(Vk9taKzY2bn7!ox(XN=p;21p}tD5ODnat^SF&c7oc# zz(A-Gq>b|4XGzO)wwHD+BK611%`cXqD8I5&-Fqw}gJ##@*)y-NqobosQ%U*xGGFj{ zc+t>p6%-Ui^hIuCc)K4fl}VQ?x}#1AIx=3yd#tF4C3i?eL1AXsxmnuld(s&v8=GQn zQ?E{n3|KDYov;j2MV)@u9d{(+DAlcFhp>e4qMHDBWR)s4iT|Ph>P3N5nH6#t<23e zH8E*7;{>(~Lh7SO^b&b#9{S9Hx zPAuX{PC{~BbxacjmG%qlRBzOomW=PK>-jrxv{36>fV#`D?_^>XZ*Bphn0_PSM z{N;{!mK5z)4f9lZ%_qO_?b#Or>&J8xO`~b6{ss@32RLUs-5s#ic&3rbM>chO=Nb3) zc>?Av>)Hm?6E!9UOePnPl9Fn^rkT|c`PPkf~r<@PNws;6sOh4x|$SBwXv0wd1c@A^oX*kv}%3kvX6)FMn?s z@@mD>9MRP2*D$0S&fxci(_7fxH+r3%oCb!yS%e&HAEF_xDjj4^EF(XO`h7z=0<|=i zq*H!&1B^vDszsuIR-a#9oSt3+F7{K9Bwz-yppjw}PZ~j6hP$)4n1!79U5a2-aR$J7 z)6NmS)VGc5VNXOddPhGQS%~Acu=Jwm;^N{309_*(HE(QejC!=akT)+wuId&ksjjiP z6_#18kXY6{@gM=C6}Gf2Xe*<7$i>MSBSpg@K$cAvrnOihsCMo?BRjg*e=T z#9{riZ2zrW3Ob2{y_EWfHoqPVQgLmJ>3xn5d}cA85JdL>0xud{x4vss=eE=uCI){8 z_W#=#1w}M8}*pYFqR5HQFiCw zi`$7?18?D&`|~eGdGQ#o9^Jw*4Vd+u`)yiKD~f&z;Tf4*P(VRUeC<=WKw?2&o^;d7 zV*!c9Ss{`e3YwZ(`b%f-*+ugGCOn@Gm;rV=tc{u%yfGJ?#?t?~e$HyUuRdN*llfw0 ztj@(yLE&Zq?1r{Xlm6QE=-^6wOSD_rX_=B16@vr!0|Kt#4&1-5V3^5@B-c0oqFq~+ zGFG*(x}|Bjc0Iv?f8mqcx%@)%vV=h(9BE0tU^e{aT?NxzcJ>F-((jcbEKEXdksAE? zaO9MD|E$rH{x`z;0D0uO2=KD@1k=lH`1#p2r~1Z;%#P~Lwo1#(A)N;8{eHR=Rd!Sx z^NC@NAqhk2e-(Z5S}Kv|*j?8Vh_Iq%NQC%UST6d$mJFLPDZUaq>i+d!VGs?7T^S305W4_%#A?li&Q)r%!yiG!f{fIwm;} zI{KUa?}ew=YG+q#7OSNdYCrI!jtC1=QjxbXvi0Ww7ehV5?e7N6SXxzu#5bX(q4{Ou z0HugD3Z@h^q?~rbU_TW$&Lic8Bz@ze=$}!=JFIf0k{(HuM9Di}aJu9VlQk=L3E4RS zO6J1ed22v&9Pi_=;khR%t=!%@F(L7Tr>VG_V6d9sTI&-#hvh6(urFHp{346Kx(?tTHi9PAq;ha#o29(aSf_ zsAcCyAg)RX8{(S)ZU6$|I6dv5m$j`O9lGshM#*DWiFyZE7vQWcTFmX$(FWY zskqG@NbMmVk0C>N3O{)I%?lH^M%&Uv$(ga~xhGEA0Nt#$w^{@_u>wZQ@Cwfx-R{7d z{e9u|lP^pe7^;)&=Ip%ik`MX$C)VsyapCSnZ?Bx-b*-rR`+naMYmp%-Qm=lFW=k2~ zEoI>6%=q+l%^qI*j z)0?dqaP*fJ>GB_2@+|pUUjV+6ny?m3$X*tIvlC~h2`?{VWi#*#Flp5>42?)eN0XjE zXBp4kikW+ex%e&=Cp0zWpQ{glbgMKJp_~^c_Q3wS_}W@X-$NFJhNE&d8HvnzDru5k z=Bjd;3^4`5)4R8?j|GV|VU2#EZxLLRfOl79@xD-x_-oz1=TsE_VWhh1Rmo3&OsVlX zn0Lh*n#<$#QWcNmR2xA8a`WBD%AF_WU|_np1KZj z)8OKrOM`>M1|qeW;$1Y#``?4d7uL9hVO0O=hQ4?8dae>%~GvIdgB(+Cmh zjTH>-vLFcnZWWmSmyaJRzQe_)+b#sV+ux@zsWK{KAOC_gmb)bowD*uyQy|>me2!fJ z42PbYx>>iVw7lHj&d$!k!Jp!b3WVK9XvmcQf$+m`F3w?<-=EP*CJHh~cZH>A(r5Fj z;qAUja&VpbrEHOc+I~3*Za~|!6_DMg2hxbfRkZ>?X(4HZUVkZ;%PTUSmNxw@T6VU$ zb6*gd!uLRP7Z0-CXe^Jpe)En@)cwK$ZB1SzL`5xbY^b*8-4Z;wU-nhpGcaRNNjxIm zpOE@L-b!Mp*j7U$ri>{E?Wn7S6VkqLdR6PHONh@_Tie@1VZQ*=1N(oF3t==OA|gAR zZd0@6t#%dq11G-Zh;&TM^auhvW@dbn_Ic|+2@$DISEipBaxBooGPFTiOKyB>N^l$_ zK0aO-eUll@**HBzLM3-_g3+lKELxYOD7~5Lef2Gu)iJFq%LTthJFm(8BAXQf^h|0P zA?w1;s7Dx%1Acn^sg@h7_f>FYrcw+hW_XUxLw(~QvFz8(mO#trxc(%IKg61{DB>}h zpes1R&ZI)!Us=f$hAQYtcm_Vn&X$O-tkzt*N><9SwoP}M)10iGufH_IIuIxuCy&+Z z;;}ZSUKKfji}1XhK4g-ak>Tv(VmL0eHv3)5yGS-uv1RV=T`cX0gct>+GIRaKvgHlm zS)$q*XXLCLj&G2A2fno!@N2Ls2?>eeS-WnLl$4ZrE;`hlkiw;GvEj))Pw6Cs0_alZ zswetAYTvPI6YbUMoaAEdd83f2$Dgrc$sS`2gwPD@-5Ual8;5-0KJb!e-^pZl>i(Q6 z=rlq(9QL#u!g)apR#|;U4COf~8PD|>Cjw*?=aSRX9>_4l35xV>=vfFtkU1zQJO~wE zURBCqyHUiTrAn*+x@n4DtHdB+)!qVm=j`6vKpz`JZ-IxIF+TD&Z&TeH6VP9`>A! ztpBgo-v_5V*@NRGT>49^t7g?5(H^XPc8liH-6oNG6Ucr*mX6{n{S3t|Ql5c|qoWGX zyaq=I71WO=`Lk=aSc${2$V*sRS#$J{1B2Go4GB5)D>n7S-q$*J#c-hV@!thdWgZ_N z_wn&T=S2p6`ebWs``Wu~^0{ZA53{_4y2c;9;1DmGrOh`>Tl(2`+U=d6nn#MAcbEH5 zGagl(Od4LN6Cre8F072-h+IuY0o?=U9p}@ZKYyByTo-UV8Z=_RnRF}Ex0#C6-lNHU?g1=;$kvU8N^RT`mIx?sQNk*q!YxtN9|lw(@D*58|Cd)*HoFd&fk z3=@{tU*o7bl*a|$n4L*YT8-t;8wW?($ad%koP9OR$tDNQ}xfik_e(3B)#SZF2MI8%y#uudWhjh>3>LX-U!e znQnujoD``s*r+DKbACcr7l;VJTiR_8`~B=$`_Aus5$Sfw7+A)ps{x|hzrUjwjKm%m zziES<4@fy$car>9HCVk@srgTP{!gR@nMS$auU~zA_pI}t|KMlKQpLl;#+KGhjEofX zY^nM4-^hxRgF`+75=WNow6s5mCw?yYNkxydvqKmp37J1AMeI4JuUq`km0t&hhj(jh zYs+~)=o)(im7tD^aWDqT>GT$t{**GS=~aZNKq-Kqw5jl~U!mjiQHvYiD^Yeym9Zl% z9StG$2eSJ4`T5}BV4i&9F9ij(LV8!1Y}>z*qN4-A^Z5|=Hfz@zwN%2QQ1L1WE6K3w}(He6`oNbk!_7`N{Ii zb?pd(j(FagdW60)oR)KhIcF?7x-H0HJkUdbJQxO^2UbOgo%oG_R;xWVGNP)cwiq<%u>MQlr(y*N zGwi8f7BDZG|HdFi$!}4qp)WAmx-UdOur_E}7O;+K^*-ijP5BgW$VxZ62uV9$Qt;vp z4E^=+7No7g*j08D>107PlwQ{~V_0`b*!RWT%U?ccg-4`^qjW zO@dmZ&k2?&f@@QHz$Cn?n!13?QC-9AetD@{+F$J3kgKKK0dD~n3ao+}zVv=R?kuJF zXAF`H3j;8nCO-dWs;B0Pinq{+eLoK>rFQ~G99vL4Qtn_yO+`0cG?j>)oScYAJfOW5 z>K9jK{yQMM@S#qRnh9j?)aT02o;5Z#HPyQ8gUS40U}a;w+ADPTzcPn^qj0npF1*ZP)Kn}`Ui~c z0_MSDTbJtB?2qtaFubI<-Wd_;A_8>|%#I=C6p>6i5z8y-j{`E`Kx6d^@!cv?^VY); z7Lmgt&Jg)cP^9T?+1`I~pD-Im%ab2YESJK80W_!nGaDXR!%xyXfXF++9 zS_r)Zym>!weOpO&JZ50kmTdc|p_%0gmp;$Iame|c$7!?H##r{b)U`3qDsVA$D z%l0kKb%>sH^oNG_fAbt16h__eD9r6n{tk$qOYC^eAt52a&lHi`8)>d4XD)mb*%wqM zUzymQ+Auj!^f-%^Yww?!2x+DgncGl9Z3de6IQi^x2JZRTgj2A;M#S0FdC%_o83p~j z{{ZJ8FNlm$u;0&Jm<7+}0#3V1rp0>(+WQmT_Rq@D)~3$g6d>e`gszk;}a9PTj7|Snrg@!%}fv97)OQNKA}LnFhG# z@v*Tc++~RX(NMzS`sSNSZ`;(1bl*?hTX16%1MLO%%9l0Ij-yGJjcnYQkdzzkeM9Lx z-Z&Z&dxvR|GN@-TU?Y4z{>em(HU=PBTKdqnF6=S(k%T7()ob9C zfk$@H2z&~u?H;mYvew1Wh1Ep;=)}qR^#dST%%pBw@>o=aC3L^KjA%lSm!YNFYHIRU zcl{+EqLLfWS+WmG!Dix$1FQQ&=qzR(!;CFDV=18t&0F*8u@m>|kCt7$2n zkTpy}y*YVA$HR@K=S6C8)4szd1{)tK!03NL@1H1iL_{F4qRI9^-R(9PDhymIRpUV- z9}P!I(SR+N?8OND+{z|BMbu6xROxp_Sk+!DV_>A(I&l2^UlEjYY%rKhSS~*|S8=-( ztCtGdQT!!if!AT(d!-0G(F4*fh9GN9BfHU6_AVBJ)^~nV3?W5rtyI*Q(*0ECx%$Sz z^=Js+o5A5wKRt9M04SV01eT{}VE2UFPQ&Gw(W*u@l$Gz4MH)YTsCsL% z@)n=Ctkgr*hn(yeBWR0?e$X!9A`W=<1qqw@9Ehs`cW0H%2c;T$lAAK=pR7hMFtUVt zRf!_e6K1LHCuj&fEpUZ5O-xKnB2q5ZZra2)Lo6D}k3gX>Cp9>{$H(F7T2zV@kne#` z_P)RsJEYb(!;oYE>pYR@87Dq=%TC#E*pP2VPzs!Q=OjOvvxVoE%8)#3qLfjx{AL zy$e`Egy)q$lQNTyg^Qw(qGD)0?arS+ukW4RkdyonH-0Hpvn2j1?+%3gYLqJO1uqLr@);ef^5JbzWME|_M-~rqV!juVH(|Y> zKcr)(=U(!4_ne%5?DK|n@%eM<@Ym{9j`CZFz!QmyrROuD=JQgDCKQQfJFkE#U-dLj{44~*1 z9}$E~MTTFW>;I)q?R#jEAn1_*2jB}R>P)pt(QUhls#k5ls1;GI$4CNjb&%Ay1nd7P zYGz#YoY}zi3PLDkY-~(8i;<4*uwy|Y2U$|#0SBFsyArUjuQX@m{e4@d>L-M!W99Jf zj+PbwK@?sQ>CFbH12of^L|u4k&Az(1Ykl3~95NZqy+tV87a2P&)y31MjA0)ol^|V+ zx)eiVa>AkkiTpz*qP*PI?7~91HS~Bu&H~qmu3tNyRRA$*j%g9F`7DQBhVSy9qw{ zC76JAovXTZ4;B5wX}U+^82{M9_T2k|RtlI>T&e1SxsR6o!>5ECJbM(Zv>Mwv7r0Th z5%446NJ^91gp!+xh%&(0#=B_{$v%Aeuv0j!0B2ZuZBpljHA8XZ8#6xrNYfVuwR;*3 zoYp9ePj`zb@Y7`osR1awf|@hvD;~Yr1JXfD`e0byMLAw_f1M^qBYj9BAOP|cARGe+ zhiQmz0A>Ps(-gw7WNe-1GC;?3Q!BA+H-F8=zORi9@Dnw)f|w6GA1R->pxdL}JqbHI z1yKw}OvIap+09=!zhQ+O8Xq9woaNE@}5~*Z4|(iAdj= zxSo*K+M3=GnSSI<#}*oa?)g)%Ok?~Vex;L<(etj}3mG!y`WKg3JIw%_g{xx2Gc9vJ zt^23BUz8Zrqmwaz{0~{s`GMK$@S4&k!kP~Gb;4t$W zIfjWdN0m?A!<|CiJKDou6PsKGu&QsonZe6Rz$X>->7im(lvF#nGt}Phbre6Zjh`Wl zLBzkTS0Hr#4a+{4Zc7jbB>n@fB4MNncJd%G;mOU-P20_RX-^phQS?g3s&Y65 z)9c<3UrNf&6S-F{7WT-TMXzH}Wfsb9Ii~#=hugno#EDRggY%g1^62QuRUi?GW2>j9 z_mw;bLDpe=C6>%*LPXl1n3H?q)5K1)Yp+7W)3xjP8MeJW&Z)hHdO_I-LgDHCuX63% zB{1D$%MLt^P35PHHMKFpiyR~K0YhW-nlN|LpM@&xjl?+diipLu zrUq@^lzQ8@P|eNP&Ss^1oi(IqJh~_? z@h3wRaT7?_DT7)T2Cz8>Y84jodBfoVO68nY=soU~1!9wM_KvAVfeZ>^g$hEVq5eL}LG`}e3t zP$hwa|6Np)d__EX8`>!XpP__HWPXmYcr!tN7~g9e+jNcu@_#6HrX|}+70OjMU|HQz zHDJG?0Y$A8u<6T-O(p+_rfJohX#?L0-#&J0G8-v{d1@pn9xO$ZUBZ@DymL`7S3(iQ zz1HwN?R8FzUtiCC`H`S265!SyU?C!Y3I~&^;}B)%f9=PwHTO)&GW8Vm^eYt&j0So@+_-$Kz&u_DL&e+)(PKRvIN1(E zH(>`#2A&9}oB+ni2A}~{?7zg&CEHFYmISy9;ro|2Ycq};dvv}6ZRNOt_PbD%;bfSZlSTZ@L;igxt$of%xv{n3xCodzXbgTl%wd$}PCfgWN^&zZ=z8e%fm#eU$Sk zGT%#JYCS^Ax$2@U9nwQs!@!SxWM?OSK~Y^c5)L*#KclAGGCx$B+&(*or7sL4+aY^1 zTK^Ca2ytvT5Yu$l+l~>-~uuA`|th zO7-@R*?Jr}rBr77>l2zJk(8;69_CZ!0Tvli9SZinLC}L^T{7)eTRg7p-c*^{r#|=x zPv;X?La{$Hx^7>5F_cDsPAu{{HcdlLp`%d)eHaJMZ?m&Km?T{;s#LN6!wFwD@=lb= zq_VZT}WlYkJTUPq~KMs?I6R@TyBEZUdE?o6>=6(7q7Lhb6pN>lFIf*d}8?|$b-4k!^NsU|v%x736yaa?8mWd3F6{cwdi4guh_GGN$Nh;r8@2o7j6s!jWIG;Zmv3dS8incA zc%4;oX;%z!S9r`|jeH%kudtDJ9^2wJZ*o8@Q74Mgvt=JFE*;Oiaf${qhO5wXa7vt>{#z|uVg%=FRPzE9o8gUT z_L7%V#lfHbFi75Kdzxw6>!2)J=5;om6U8r55lM_oYA3SeVKnE%r1gFNGEa3A;bF+l z?9^w(G#*|Ao8a~mYJ&>OJR z#Wm%~mjJCfcLQ3eqhmiJ$V@@QW?k4qP+1?fVMTWI+Y5Kbp_4&)3fzc8ixKAOAcyIn zs+i_amH6$&OJNKnRky5sfBZ|=yNIjDTw^aZgT|Mf($XDhKLU}XTR9~ZCAa1}S_8?( zOO0=#A%ltb*BAwE2IJ1x1mURX=GjhubFE!WqDU{|-WybDF~dCF@7Uk4uoX?lmk6x< z_;LTi>S0FTNhQPS@9&A|?5PuI9bKUH25ASt0VF5q9)l~nfGeu=AR=6(hL92sNh8R| zj)@bo#;ZU!i7b8I!&%LL|GpPNiz}F3Zc6`?`ep-%wxUBaX7jb&z_!T5A$^eVrViIW z@QC=t5go0dMb-secV%TIs>{jIW?QPlV!|33AM8RxE!1lEfpdM_rI5z#G`Zqy$CsgD zkk$83Phi(Wb0>=G~ zDNr&6H2oC4yr>qTBlpsVhd_PLr6k>1bqLMBx!9nKIOeuL{u0g+uPFmXSc?Uwn`wZe z!z1T`_XKgaQDiFJNJPg20*h(XJSqhoI0<;A;^;o-{0d8|W9%&Vci^%-Br z48}_gaZeNQ34*~fAI?gc+Szp`WEq?4lu=~+&C5NQ@&7n$)n!d~i&!WHuc!=)-!aMy zJV?7g^Yc*D6a)4ZCE9anYvz!k9h6)47-bxyLdMd!11mqli8txcfR5R9EO5hyOe4)O zmUMt@XWQQ)oYY3d3E6@!re(RX#Jz%O%Kh@?3!oQ1eXMC83%iOqsco=Qjau`heNSKW9~+tQcj5kW84HU)Y7W3nCGDs!hU z>o9cN@8V*{W$#0=h2o&a>kM2s^tC~g$?`7Hyq=4Syo(zJS}!qA-|0lCj?ZRz)vXnp zHX6Hxp_dx{d{ogHjX{}U#2y2}NJAD&GqcW@kAdnHo=W6hjO^yKg5H{ztuHJSI7d?rRJI3r^xne|Pt4ss7AAF=L1sZ>OJeMbU|Hj;KkTZRA7Fa)XlsfK`aM<(>b0$= z=GY9oU+e34-Pj@UpOf`C8o0#t@P^yFJJ5G=Is9xacliFgQN{wu=t()&!AMbKlHg=vR^}tKJ!4 z;thb#`lc=?$lKX+o81M^4)hjT_M7}}B`>&ae%)^Gy%&huIZwG&y5pDs2v;0B)eCDs z!>!eEGKP!acVN;9da6|AAGBK%MnZ2JyJ({w?X8U`f2shU+f%NlYLBCmNn#tnZbH{) z@av3jhz-JT1$wZOF$bVCsHZ4bIalTV*Tf*fGe>P%R$5xay3=PL{r#a|2#<-{2WR!5 ziRe9|c0Wq5`V}oU4yFCv{CqckI0kmzy}*PwJBxiz{5^114>1<66EcB?QoiW#?>F<& z3TAp*KcNdnn6N~&rqaXixUJ)FdH6C!qoRJ$f10zLv`-`S# z7pnWmP=zOBxBlrbMhCzGH~&;tQY!USI+_g6g;L7crQv3l-7(H*fXV z5r9S?+A1|VS3f`sZ8O*b?@)*swXD^*;hjAEMc%#Rs!(1{N<>7&&g`k?0D$p|p(1{y z$lfWS8`yEysV6ZyF}BdPw%ICpCci4@JrAu>$F6mT({k$S;;?Uz4HqwTE$7CLtUKki zdnVQ9AoXNt@Z`qJ|4iHwNFgW#R~2@&wPiun5-2x}*BT6)*f=k)O3ASX6$?9^C->)w zh&={0*TcC;=ppj;h6oNN9D1$2eHo@S$(>G> zVG+o&EI|=#KP7vL~o`$$&@2uDMCZEb@xnSLCAc^70}y zUgCq{Ai)+yLcniV{CJ&{Q(pCJ9i^b|P?lGwyHHT)4w>-Y-A_IPH8>@T|4| zq@&)K;%q7LEW4;E_=vx-DUgge0&D3I+DD<0RESR{NyO+3`Y*>LA_zi%ge4@%#3CdM zYHIlKFYSQLn|m1H`SqDDhORvPQC(h~oYW2k!78HR<>}F`qyp~Q`upZiDgkCNqFI2o z2ueOsDx?s^dq97jajN_B?h&;9i5_}Zq18FYbzoFOFSX16(F^S~k^I9PrQq+i$d zEtiGPfYJmb%W{UM$-#abW#u7|mLHAhsun;qxxn9w;KhzN(Ej~Z012`q@*qPf`!of? z4|aI|d~oRvwAJ@4+zQ@LCwXmPfKuXGT?(F_o{ktOONC6NDC)8df$+xUsdlG zn0}hVPik##9g-zZ?8!9Hh07fu+$=n@;DmZr!=@Qp6*T*wNW3-*PCKP1hMAV?2(mbF%2D^DcGT} zBj3JxLiZI{RzU$_qK$6)L3rCHR7y`M&VFt(jWlty0a1U#xEWPY*-G1=OM*#~xI`f-p|`?myeq&a5N)NJ}?~&SQmm zcWZ;P!g5O6)xZtfbCHBnFs)0~bhg{5%1x=$g&9`ltLiCiH zbeGKxehp*l`&iGKeeO7BOjJ}gkkjK$d=G)^^HHKt|7Q7$KR^R61;z&_Y z$I$w4Yfj(Bh=4TdbV=RS;PM%o(&>#`6n8-*XW~S|%RBDj7o;}z^Jf-80MrBr>vNf6 z`w(bNFFgq?)pctfk7L%bt;*l>8ekZC5aX<3r(hT**VpUh4oNVdwP#MsyA9m3d^2M% zr!_S+l%5}_j)UnLc>o=(1fdD_(3SnI_RgId9e+3%*V3?{AVPnHJ*S2xtS# z%P0T?o#}h# z>Y#j1B_?ZxNQV67I@T8|dd$5#JaLUbOIHYk&RG8*wW*6Y1*3Hf{C&{1e}MtHKycB%gzua>v|WhDOh1#+Zw z`3|XV|H$R5z2G81B(SW*#!rq5j)DPmF)7umvyY{RBI)?(2!6Ju85ftFFKT!MT6QTV zh>iddCchpASqJiOU~_ZxbPT#a+WkdodT&~5PYW}%+k({-7Q1Z58Xc$;tc3b`BjqW> z!Q{u6veD75kYPGQna0&x-@qV;j0ak}AmB%Q15XP-eL%ooKDK6mt_AWD0$1#rnE{&< z8RT6*#2W7RWg$|5L}n{KsD*n*UF+@F!Dy1JKRG@$q>9rMt~74^d5AeF)BmfS^Z!fg z-s3nhzwOpATlsa%`K_(gs5BKrM2EW6hu)UORKk_5+-xdJrl>&cZfPK1GF*NOliR43 z)O9KlLQN$#or4}!0Q#!(sCi(xA*7!AKdfP;hYEOanAdEKA-pdb3U)<>-2WOP_P8dKIQ;zWwkV<{_A4TZ)1{lEn~Ak zoLR5T%i9t2){e-KkV1=jmCHph|4{Vi(eSW<#B%OWpWj#p?QVJJS8(%wBaa`qr9{;0 z3*oxD;A8QBLA7leaZ6M5U4rONpxXY*;=@KP_HGB4%f2V}%Dxs|CeTUWgt6RG~+ud0x){rRkym#DcOKz&k#l zpHTGoX;97r`_m4lC1@Ks{`3F{ur6n|f$a&1;b3YHA$D<4G-PF+Px1KX;o1qk%p-L<%p+{aLo|GXkLY;nY+& znHyZO!W)a#W(9Y&wUMQH*MNzeY|nP|#J}g^QD4LN1Tr5c8;~0>{sKCUvyc~wu8-IM z<}eN|gGl@hK3-^0K-D2$Tc5~3ZVKtxhn^-8KiE`W?m)CbIXN9XN7#Mn9DxXj9Bi7s zy-~3Lf%hgj+1!jecY^@?FqSwFV{F%dmlIwxx29u$L3O*$~H#b)= zVYAtod-1*tux2L8$087Bfvo{lH0^P7b8~Bq$kBpeJPt>bPvz^OjD{wkmuDKi)w5&I zbP;aF-X1Y6*wiGENc`uWZVJox)&^iOR?FdagxxZO3~y*`RFU+0eat{tR~Jk*X* z9AU4_$1QrH*>|85wSfiD$<$B81u*{clQYOch_g zQ!^@E{h^V%+x7J+(1%MCpe*&)Ys9JXa zJ_d8z@j`bNCt%w(tX(7y$3d$%lU`Y&J?GjQHY!1xnrv%OXKe`tTLz5a43)~;(UDb~ z`j(=-eQPDW0NwI)hzxnFSTmOWedixq?GB7D$=rK13}|$G zj%&V`Y;%+Uw;I(Ikw{crM56^OoUhhZEfpL&m>fwCbKTe+5iMhal9LeuN&e;R>Sk_Q zi*s#z(Ut}9PvH@vaACSN`4m8W9%V9TuOq`l|0rTq{fn-!yhv3+Q#1J^!!_Ca8;1@d z!&`3O=EMrYprWQat2l~&l12IKGpS5gu@bgNpic-&2&4EBLhFBNpqO{N4u5J+0Yt&Y z-oP1fbaYg`D1{hD3)O~Yx|{q$_sk0M? z@k63Nydy}3x327(>FcU6{Byo=ZkhhKQ9uL(zT35s#VY~CZc63Qa>PG7gsz~}loRtS zA4(?x0aw25N4&&1$g(`U(-?ewuTx#-_B{eAQR#dAqi5U^!I)gB{G9JSG&Yt!tyZF6zYyM(bYwqvQfnhqn zDBYayGsW$FR^F*wsz?&O8>q&+SLQc{ZQn%F{`%`6dNr+PmK|GHwQ~GR+1=LGOVz8; zg8{d?-y?soJ=5HfY(|t*PZmN}`2XBkOi3iN!uJAhf}Qw$Jp>10l8EBy&%gN(@eGB7vbJ>bo3UlY|z z$=J7M=HuTmv{Z4g+>4#Rzp0*w0q_3w%2?Cfk4*}%X+ z#Gs&!m6g+8j!tRjt~EOwTT@$rGwI!Mz!@4?4NnUXkBd>0cvZNYV!RCHu9$*C^uUT{ znV#dG5#&`Q+j%bYU%k+F_Uv0E2&6O0FM{9ybZq}r`}W81@84QVk0tt!Ms3hnl!G2_ z#Ky*kg*}>$Iy`ht8mG>I44rEg&S0mwySShmlHJ)wLLMVlKMKN_0%07CX7YZ$0P)?M zZ~S>gkIVs<;iX6@tw>BvjNxIRMsZXPBqJ#aPV$kAm{?X`K7$-7RY6KcB{r)(KW^D6 zBM>nc1ReSc2YSb$_MKhz=SGPoN9yY8hos(o;%sSe@8+y>QUU^z08}vUp~XXsr|kM``e4JuEED*4FmqrlXh1pyeGi78?wTJwB}0=9A*qbD6oY zu&^gF|N50k@2_7R+}v5h{+UF!78ak+8Vd`1SIW4!xcDOO+}zyy*PLSZdtMOZE7%_> zvrJQpP*YQnr>QzC=Z@Y%4My8W$xw0sR#jDfKg@uCdcE`WC#2Qf#>VESVB%S0dHKk_ z?xV|U=-Aluh->z7B$OZ4;u&Y)eY!PSZ9YO2C24QFHU@WA}z z-P^)0(^)=**MIt;L*tuh$UGUOSy@>fuTCt<`vTd9x8?(*)%Y83iMrq;L>LI{i8T%`k zl$r~O{H9X!wBDzezUkN54f~L!S7 za}RLa{8CeYqTN}L?Q5nzrR#;C$qyg6lz^*@op`TdVv?IBCC#v^loa|c~IE`NR(7SOileKRc;vj#W0;9>5ZL)&yVF*f#jxUs%&cGsN5 zY>02bvfc$Y?qgQ~5IT9?H>nQZjGDi66-XA(Y)Q!42F7n%7x5hoa zb7Sn64Ml2n`fajOzi>Rx&(2V%Og7~&nBJ7MB6g6+?Uk$f;3`TEMg5Amm8mGf! z*7@GQidjNKhKW46i<8Bgf(nZV!Xkz(9;3RL2O~KHuY_S|dkf8N5#9$64sgf>4xBZb zykpj=wY-~OJ6ZYzM2neqNn9vzoA>7T-&~G7@E5D6K?Qnmc^?>(K^`p7>T$ZpDYba7`>SuB#JnRom zf8iG@vuBPBCMG66$j5bThF_3$JlcYc{>;rCt!&53_;N4^=k|zysqhsf+dUf*@SKC)prg7m zI-p13C?)5%$S#dYvEjGPJ>S3hxwNY|w+JQUiy&;CV10t|QY0oi`iHNR@Q0WbX@V7+ zdry8Jzr>1q++X*bS^o6)n#^xeEtXjiy}fmXb_IPdpuLxCg&Y@+uj_juyQRdKg@On0 zZe8y6h~-NHHuvTG&mA$Jq#j{dTx9C*SF%?y@E2zVW4>T|De?dUc_$p5qN@3AlgY=l2`as`k81*e4LyK_O?xC2MtqU zT1ctwxvXQzF7fZ*81O&_>jiop|1B~L8j?=#=tVVZm8yY(fvoJn+CWk;MMtM+*$u3N z_CCx*+(!t+?c;`i{V&eh-55}r{8(5NM5AV1zrmryG@}JFHGjq~;+_6lz$74ZmyhQo ziiIB$t3UlK|LebjXgDnkryI5yp)p~1Da})<=eVzv#q*cA%N64~qE>AnjdHn0s8<2Fg zS~|1R&@a=r(kGL|NV9{bmRb_NwEI?osI1HH(HsVK9}@R7`&?1R>)*0>nq$6$)v15v zcKa{ictU&e?EM{{&RJc_qk$^<7+O4x7-qca-NPAKt&D{uKN% zC~*5GfV(y(Mwr$EygC1~S*-}wzt~5T{i`oY`1rQaykEpe&$%deK=ZXT;TG;u^6mxE z$R4J6^BW0a94VfXZB}u~M=-Ix_!Yv8;`|kh_N# zMxJQ)BZtnfo-alDwjFOoj(@ztf}Vu=n6I;!U5~K{A#)hAkMKA)vik z*tcE&D3eb_OuW6leSEAq=Oc3HA^5YZ#6jU~9@WB1($5?f6;;4Mbai!AsaR#DnIV!| zPcg`lGA*-<`rY{%7Qi&0-t3GM4eb=7M5jX8frt3n!CAkZ4Rsf#0`SWi_Z_#mDBm~f z9ct{Sv4ImZ)Uh^*f*Nj&ij z=8CaaSHf5_L2FGIfHJqHzs~OMkTqI&2zh6;ll@&@`cq6Y**>}pwU5{pch(mf8JYQQ z(LvjQ=x1{2u8A5?6!>!z?d|R1#C5f`3HON(MGq%C)13|2UR)k6tl=SMxL!P*=KCWUw0HH% z`I9cVYfKRAl*^Le25FX(>#3D9-_2aT{qZrWsjD9<_O`iYr^cl?uZw@Nj+(mL=?icA?;?_r1*CX zN+Ty{^+dU&G&}XfSrkSYZ`(NM`5$C3+|3^*)OP097r^3F_kCuPyCKOBl;@tVZa?d_w% z{PfQ(Z!rdlF>$i&<(9;;{gG=F3UgzQJ(y_TdNHsGngrycx;IKXg%wuQiL#jltZSa; z<{t2P`}IIbPd!MT&UMy(&<-k3n%Kp>eHiuCt!j1r7TbG=|9U@u>4kRA9*w9$SbAv` zU0htC`yLqCT`|fv-(VZ@k8X-|rABfL4}5)n>+NA~Z8uk^+vdaRhvsJ}rWwP-!ypzY zVHCa?!-m>{+Mqe6;wtbHItrc_8oYV*?&R$3=xr36@gEaE1x&{tJh*Jj0{Qxb)?b%8 zM!mZ}=B2XLI!kr>Rf{zA`LQq(sxy++1$Z zx+M^X=f7Fql7UWcPcOc7JyE1OL7u)Z{X)%!b;bYs`1@mDe-c+wxVx^7j@i5sd256) z#W0ij+xz)O=T2=xA*ePkSN5?QpKydj-}|w#eA(@-Ed$@vN$y3Dl?8WLtLyd@6bkLN+k>_H!-q3u zlb9Z3VYQsiS_6=snVGQ=L_DbVWYwurrD$hk!(+eooQBY!Ei zM1?YGfE2s8RJUFTn<9J^c7J!y&dxr1hxqL!rT5v+BWD-4#{GlSKZc6Z(j8%l{|CIQ zH|`FFet!;}{l3^bd$JBYuY5e^>kAeZJ>QBHebJu`ezq6$(G#=g%b* z3@0LsmQj#2{5L9V)#);Vxs5>1vuAlAuqhv< z74hAW0^KH;&06a}LW$VbGbBw@?_|P>h*B}2xVXwhkFU@XL(s>ZgGS~Lw3Zag|Adyw zJ2`#p=_z14=?YcW*505+j5xR`>u`tpo|`=ShNZ8f@~kOwbd#z#^Abythae&O^BsZ+ zs`%S_xt>NYJN5hbXO$u>{S_A%xil=_OSW()#1srR*Jr!eTFZ6|P0z=!puWE2Y$qw~ z8=Q#WFek&G(}#qH-p2emS*?fpH5V5bH#c7g;s~Mh9EG0>wM0ipLu+I8$XrjiYOoe& z!`l#_R`Ig+?rQrkbb|cuAoS6rN1aLB92|pm{t?$gOx-!$cJl;HAu<&xdMvbhm9FhJ_<7LJ6Ixi~m3Jm?$s_TW31m5b)sXgPf; z?Bi@M{=#0kVAxL|O%)ZkVl}Ue10$F{!TR}Oj_`@622)lED&rtjUw=at41yfb(wDb@ zQCCyC_F>O@yS>O^*0^O_m9Hlr6A<$^(&ey-adUmsMZ9-V)v3|P!s6Yp*N%70516K| zFY4@JZ7dwQqE9*b_?~C?_U5Y5xiEo|0hbo%dkaW;W9Kk`SiVQ~co;7A`l_!F;`DWn z$Hphqavu|>9&zMbAxucNH>>psmwn0?*zq!HAY;E8oN2I#-q^6-(D?ZH0}eBjm6|Y; zz`(d_e#H3q293^e1(q2Xa8P@qhyg9|)=aN*v6lku&$_dnt<=OEX2a;KB>h%O92cmPhs=J}W#!5z$TcyOEoi8yWP)DtNl7op+@hnRnmnC#LhNiM z_7O+mOZLvrPVkd#n8(WFn9!{B^c%SF{Uwh7g&2W`jWA-NunHug(VKAd+uz9ik(H>y~`p2(dzoJLB*tec) z01VUGcl-Mj>HL8ru(({=omWZdm#9IF;3yXCJp!CYv zD^Zw9?XZjdPAQ(dq`3Ha6d5nOYvwN_<2aQVtR<||4e>S3j^%&F&6c*`2X4F*_^%s#r%H7L!wRvw4(v}$?R{l`o$P>A z_uZF$fI{|Hb{`MWEckw5z9=x=P%VVwB0Qf*k*rOE8u-ya3pgStCno^5mEpelR8Vk) z$KON&*=1s-)2-fKaRAE5xbY9@pN%dqk~s+69(3V=9dBRje}5Ms%ZW*?^En|iG&Iy) z@0a&~AIr@h$C$I^*oZ`YkjfXpJjlJ<-P^-Qg@Gd^{QE@i?yCG=Cw>mC{r+};#N)#G zdA77crnOOX3B!pL;u+d(bYh2ht>6vdNL<#d&?USrfVW>2w?8Y+4wcCdUxmoKG+74k zhJ-3lV}tP`7$;u7EWD?P4``di@jD27_ihAi<~HG8p?V}fD<_O0fI5H5oAK-7`aTqE zmJbgNF)}etS#a?AwvO7crAD5_=r_Bq!QqAZ`Q5*MWfLRzQ_v(hnxCYL`8xV10HOjA zNIRsoZ@;aR^#bUJPcI0fv_HV{Saj&>V2UN-)qG>SfI?&A_wS#LW{7%}#=gU*Y<>&% z+wt_22n#jRMziYbrhIBWU5kva)uE%O2jxnO$Dy%{8F*KK!#hPqa#GUAJQa(L&Qi%C zoJ3Lk@cWBi;Y7u9KZfOLcF)sE&0B$kcj(ZGXD^tW@aiH))ikDb2T`sg33mrm%aeWW zX{m>_8AJ37bf%2^Mn-xA5R|hBPfcwtN}P5(*cOjqr2v!9XX$ffN5I$MPN9a~fJCsN zVqu{f9Jc}CzCtYl6BoCcvAIafE4mP)DhILGh&jIZtsTA8T*Y6kfmh-!92`_Q>^_H& z|B@g1Wy5vtKk@WgwzQ<=#Qn8@RZ*u~*1+_i0W;$44BZ}fS9Mp;wY4>Za0Ewyu6xZz3}rFtD!%C{w%@fbwYMgQ5n-Tn7%DkT=NOJyQPj8 zP*6ur%u$_{pQ9hx+TLDSSsATF>|oK2MtCMBRePqe|DvfAd+-r1wF1;%U+D9EeCToHr)Bx>ooC37U4E-)mM+(<*nUMGNai6kZNGNcH3iZvu; z{I~(Db_u!dGe5fefHIzf84F}P9^#+u9i??8`ye1HZB1Gs=- zJIEtxX=#l~CrD5&0)v%FOm6zxTHB`?2BArQ86;Xp{J7@bO>_v~86~8oT+UjT4^5VA zfUo5x!FH4-8c|0W%+7CfXAZ+`*P$PT3O6PaiUgER+YhPPh;aYg@AW4qCz=MHla4!t;N5-& z^{e}%68MZx@uLiU@c7Huzt^1lo5A`dG3jI2#GZf@?gIEhyL3vuSR>fB9!@8c#xroW zA_qB6KEKOT)0(!saF?xj{d0Axb0zK^_}CXwio9HY^X843rY1iLcK>*J+tn7aeaqoq z^ZsIkqY3C1Rh?$=yVHEg;&+c;J^~fM1;{j%EkFgC8K<973>li)wN^xJDEX@i8+xt~ z(#G1-Ir5Dp4yoy@*!{mUY7@nxsR`Xf-)`2=i$W|p2XJh6pV%4iRD?W0+9R$XDB*T*(l5-Ob|3_WKW&6*N*qI1@sdS>DO&x?7H8i1Q)h zg`}fjzQ8xY2PNY^J7qIcj{eU+j;g1AK~|NxQS9(P~$4vT>h1i;P-+Xfdl`IP+!@`eMJdPpE&-qVh*tY3()d zimKc3VqesZNjb~p+Wz9#*hHdm*HMHq5G1~!!F0-F9(s+C0w~zr@I~LMn(n67Nl|I9 zE>^SK!#;V($UZ^ioL=j5N3Kx){K*o%Rt#UnqmaC(*|PNRFGy=XKhKU(OujpXB|c(z z+2+lUffr`Ik4Izdk<_g%^791^vIkpW1=Mg?Vm;l z*<0&o;S8-e4E@RzxfzG@V~Q*ZigQ*&A<4-EF8@IL^E@Qu%a`|d6)0Tj@?qMO#_U3- zFbA92PYjI0jCcyG1SC=C8u4AO&wjt9@6uku#f8!%-uF`U0S7HDvnsdlgLfkdL+Nh9 zNp53ZP9dN6C=m^84hJ$^PdvmoEFO-P>) z$J-bCA;GqP52ui+qTtdt zF90WZ$%5x=7T4Mzv-SQ09BEtWywOK^M{ZRCH~<9g3J3pG5!1MK1V4&brQT5 za~_eVJ9U7q$_+&rB@%>c5gQ->p{hy(jd78R+upu*Vt2)iQjt2mJI*`PyI!#JU%?ic$sMz)DM z!!0X>17{ktXUcB>=NX{o{b|8lolw22w5!+XL`j5*RicPUIckEJn@grCQ^0cj?r&!> z=CP2-F(`GoZKjnpwC$XyALv$pVeUU1B{#W%+;0B*27=y*R&(?)x=vXX7EYe7f5L(<};(Ap~;dF zlaUoLZ<1ra5#cX2Y;XI-{xCf=Ll-~}lLYUPVbS4j><=nyD=WGtN_`2u{rQerTa-`+ zT59uFn-6!dFuSxdF%sBPu%Zy>WsZ_J=ziaQ{rC~dOT2iJuN`fqs1MMRsj=jm5|FzJ`VduO782o5Kc&rYzm7N0Q?oibl7M zjg1dX5b$UWg3~vp;!%qWDKa4+9xkpPSWH4ff`sD6_O`APQYz7#;z>N8fP&KV7iqzS zSNr>USDC1#W!li-K|HXZ0R5tgKvIfw&81^=uwPn zTf>4`P2cnDS8`(?2JvgaL}@tu{$?)Uz>lCKemx~7zCKtI>#v26WbJQnrvW%3CwE_z zPfkvro}J}8OxAL*lcp2_LC=w0PdP>d|22Z389q}`Qx_H%!Ut2> z@d_@{fCwcrvZkiy^Tk`0M^My2qgzMG69RXimxerFS7z;F^)xPrh59u%t?+fub%|aK za5;2L%paH|Kx7vIA}jyQ%o7(?=iw4mB@ZV!pE^ASY0Nk=VdwrjnVI_@ldeUpv6Q@o zWdvv5=1gM|&h(KR{z@+bQ=VjZYwGAUO)BGtVO<)o8U)w*r*vGG{xa|Lg zGD*4oY?(2K+;4q<^usP!n>Yn{PUJ5rA9QqekB^Qnq(H0X`szv{OQ=+(2>*S61`VF6 zm+fcc`NZczcaQwe5Hv$<1%DEku++ zi4EFuG_zcsoV?_lo130*UM_Q36g}Lalucw|NONLSV=cRBCR(C{fq?I*9OJetUEOV_ zZxQDbhwG5ZL*+G13Kzetkwwu*qbVtWquQ#-`eV6&pwDx{Qox!--FPIGmD+aH+ zLcWAF{wJT+*T>wxt8}9a%+Z;C+mkA4wACp^4OGZ~!d3nDs7l3FA&KPp@ zNvv0tEw;#$*04REuvaMg7xFN;tPhKg?=1Pga)$ zHKsajY5TK|Lo(ucp&+%k!nZC0a|VdN;}GFzuaU_gOWo>D#c7=^VI_*9*DS;HlW}sI z*szE{2UgQF^Q(9Ag1TW8 zxg^3=4HXp?AiZ#RcQ-UNbarOXA_BMlP{HMpj18vf1kNA{fybA2c|$L?WU21}2>% zSI4ERoV`%WHf3Q;71(8ESCK-()w-Z!_k;esQhtfyi;vtEpcoTDAKmxs9|BSS#y7}k z5)SxAML9W(ZhCo!Q;0mUbnEo=c~8^e)J%D$Q|C$ETlqux-e}Tc&k?fG(};n`!7YHp zNY!5Nd*D?$_Mzhy=J@Z@>3AXPsFXq`MsKbi`lmv+1B0`xigv@(iLC>A*47okK{C1} zAe9ELe%zaU+Dpyd%}RCtIhd?`VPuBnj7@)XE1vV>9%KhUKmF8PYH(WX|M@e3MWk4b z4g;E-n=3T7D2fRkwy5JFVVkmEClSWcgLqo^5jxCU@SEbQW(bY>fqq@Se-)N!fcJ^`IQK@V56>QAx{?xwxc^&WbJMwT6lu$6h6)el5ezkcFETdrb9GOQ4eV)b(^$9%peaSKJ+~ z-GDMzfHvzytW9ofMN4E^)j|6Uuv;&ST3a(j8E^kw=6eTd&?TI_v$tnPuHgpSLSil( z8b-!R3l7x44zp@N6ckVD7#b?Z;5~n?T#U*h#l`75`#9MDYCP$T^iL3zc4D1YF)_i+ zH|F5RQ@07LaO@C!qLVW7l12%XMK{T>*LnFJZLO2f|<6YMV$_=5KF;M z*Il!SjAI^hX{tEdN&YDB#>c(CXp6bE>7jPAT9sG}1i>TqCTvSzY1;^IX zgCZcLZl$H;R8D#=^;CJXCzaki15;%f=dPdH3J$(-Vj(r{Xs4;DZ?OvQmlaLy!N(NB zwd48rE4p_ox+#COE(sQSnpf;<8O3?$zq1F+lMZ|9gX*-y9^=bklv z)^>ra7>$NrLU0o)Q;8dta74?kxvSmga1{kAL2rM4em*vXpU%4Wlqz?$6S|Tp82zKr zmS49-H@d2bdhj(qFROzMD7%_i!G>-bgTWLV6F)i0%0>Y79jZk*pnQjhPkc_d)GLKB zk@z6_ru`lF)sg=)g%bZ<4E?VdZZWKB%ko-I?bcduZ)+9jC$jH* z^eh)s>F4Gy2QU^#J-43LEiHeGAbOHDjZI9L4cl&hM}Wu*|I2}VqQ_dXzOcXvQU(yQ zHer|L6G3)Qyv%*olVg;blK$up;#r9~iWA3F7PPt@la(E0Q-b2QnkvJRS<1+3{8;W_ zM!!8t)n-Ui5i!3#D1pihTInK(L3;?WNR>?6e_x4&YU=5w&#>}xYV7)`6j{WDsx*}>OlE} z67udsu%$#|X_(9fsFuI8BfGFv)d>{Y-3>f;Log+seNf@nuTQ_d-DvWiPaWjdN@HO( zA=PJmD7>*f&lXeNQ@GQdS&8awon1|(?;E#}wWj7{6TecQl#ne9Rv&`w@_V*{w^b=!h0G7aRe35U63kP3Fz5pciy5T&-Yj z|3#)jLj<3vjGUaDU6_?rPA^495)(%;_s!UsHu@} zH81Z?TmmB>!G#sogmJukpyCn}=O)cpv}%iQ1v7QD?wC#gw83A}(|v$K8{}A0i(vX! zA|sD*&9(h~W`CMrx^rgvP{G+x?1c?Pi*)88r93?wyNSF4Tc8-iuubSNdi-8+x6jEn z>TD->nHk5x;kU6*f2QB-pN<=)QVcGy1R?B2Yj(@M1>< zipg65NAy{Px=o266j_dXpaT0YjE&?~M}i&N%W)QGosy%g4|WqP_w?(f15>t{kjKPy zKZ+UBYg-(v7$nb>6OUTi6;+Lg1%}5c`r%qp3m+=Aw+BOh_)dM9iklMdM$xk?w`yku z3yff)18Q7dU6sG@0%Clm7dR(zVd;S9InOT2c*a-D;^K<(q_F3>)Z3aoB~K@@L60~- z&;7fb#*|f51+;jaOV{RY6lj%^M1eE^YJ?-Jz(+7mp{z4Sl)K=aQ-b_kA}k zoIfh@`_J90!nML+(?U9P%{aRlii&=+ZnV;Veg1#6^SW5T)MlqkF9Gt2P0Ef&5-;>7 z@xH%iQeEiEWUy{eM;Y1WOu4SdE>TZ6L1Tx>_K@X9Zz4Cp$150krJAIY-0*6#Rtx4iP;(xugLNz-~EtFPChNvO|b^sggZ&#G5cbvr-qe}u&o zR;Lug*^vwB`b%#Zxpz}ueYd?#-4nh$cu0OtgMCH7m*J@EJ!WHD(ff-rL5aS*yE}!L zXK4`MsK~}VttL~|D-9<4v=OXz`;9|wFM+)6cH6Q^|B6PctvrIQ&+Pt}YKGC%(h{CM z7zk"xG8s=N4BkUe_HcT*9SZyRwJJ#r@2?FF`|q>g-iMpm)VL6W1_Htc(l{M)%X z?(tha>w@o6bJr0=oyAX=Zk(>lWaar_)z$)awh3+6`iM5olWZ|-DSgw?j+DSz5P)9y zf7s9;(W=)>kS-2$`(y1)Za!0`n!q&4)pljKXQLZXCV1AW&E_IJOeyAnl%mIj2Wp}p zR+U3%dxIH7oCq{Q-0zqQkDuHjYZjJ0Y?Kp4f6sQ(bk>*hNH&rBU2#}V2E9ea5h(PFWNwZ>5R;x>zRgzwhdIZTUXvv%T{sZhAs1rs$Igs+S9W>j%^#uk zu0>D3G?taKJL77tZ8k3kD*33JE#cMV^WoPb0GsA^()F06{DFC2GC`GvQ3ZmM^7qbH zr2H=GH@;3e$ygaqV#gQSrs{8@d%X#5I=ds*liq`A-1h&%DDdQg zw49Ee`y2R&B-&9x zTkqQ5;&zR9mn}{chk`4hv*>b;?yb{q-+<(bHV0JHFoW@%&7IbhJA{ zIsbWcIJQnFezv=y38UvQT}vsHQtZAQ#$?TtH>X_CIbn|QXhCstgec=c)newCq$KKP z2JD9$(6H#}f;ajFf2>L+WAX$_h(UCKRs+QhbdG4|VKw=i;fdV6jjf?zBVh*)4B zK75dqlRG&-_xAqe9F3;&Ivs7}^}FQV)Uc@0JepfurGP!FF>6zgL#ZU(-mwxGLH(|h~CKz9VhNK2@nzsiVw=U6+7jc}f zT7}2q@~>ati#2og*lI*SVgsEYiy;tW0gw?$rL5Hf_w^+3R%p17JEEO3SEmvSJm?tO zmd0?`oEnF1rWc8RVCl~qo1V!jFu_UOoC8Zk*pt$>t1#*rwX4J%h$}102)$g?ek39K zN`J$@9pui41tgXwCR%oswdxyR!=|`ukL&HdJw$zc1O~LZ$#C2A77}wyX3PLk9GVN$ ziW#;L1UHg^r;nfHe~g?tW*2)YB2@s?Iw}hn9%>qi@ot&MK)v3eIzBd-?mB)P*|3>X zI~S`lWIQR^C}8y-LWqcGp8_GIk7&9CJy;U9$_$eR#j#7dWV9kjdR_HEi@>CMo#d%G z319o9Rh70zS^8-z{5OJTJkHW%!f}cuMACVtr4ftE8)Q^dXdXgMO-Y%eRd7H^we~?t z)~z_Ra)Q0!;^IQYzd&GK^)q7KUwQ)b!Idrd{l(`BE@UJmJDZ!*&s_T+Kiv3@l^dJ< z`)ewndTaFETf<=*CZ@7)DkguY8KoW&h7bR1sby^Gzco;7f~k5OudfCILtd)OC2#mz3=#5FEQa<8xF6ZzTk{xXyt3 z_asQ;Yt?gPm%OJMs|3Wy)&{@QpD4^0ydibi1MD3**G(o;`E|3ulWj@h4m>64^7d?F z6cUu(fV@8Xl0Uop;SZJ|OTa(JbAo7#XEoOf6Yr^F5L^PfaCGSLGfFcc0PlDHy4jzg zJugJiH_u-4P8gsw^HT1zVB-^OoKOmrnEI+JT4L8 zz=vVm+PV<}o)`oD2cH-b;wVWk1Qm;X;zOCFnD}YEYyQAN&BBTRwX9lIhMWJ2f_{Kg ztiCpf8eqsfDuF&ApcE7kNZ%=9vz3z@97^NfY&K2^mIN>D>EDl;J03h<(EL#IANeXpYERuq>HN_7fQ&* zq6pIN0-QshC7YFZ1bgaqg;B$jvkOtin;XfAIZ~3}1Qud}>EMR$6VJ#VOKB!%`{;8i z9(HfPKeG5_w6=kPS{uY*jw(P7$jSgF)9Fd}i_f2lg76gvrYzPy2Y|kL04QOH=bpNQ zY5G9t0H81^6ovMPz-4C90E3sW&7bJxr_M&9;jG`s{&5LNVm8UK2V-e@oYw~zjTAe+Ej#sl`Qw`-1G9$N{jEFoM6ex5Li) zg29G~)e$@oggRZWen0L^Y44C#$V`VY+V~&(A=KU?SilxME6^Hjd#7M!lwuC&3kF<5 z-|OcnA_0lv^qT7)Pg0*9e@bnC75{aq33}dA0t0mZOLed{JAdr}Js99!J_kb4p(;yk z=!|3XdDRZ+p;c4IYRwfFnN1 zSI%P<0kQ8is?Rk4^5m@BkhU|Yn7#^MJY6g!18CzGnzh~oUh*Zf1xG5l0q(cuEG->& zEV~F;+11yNgIpy((5sCKZXvt?%ge)aVL^os#nd)gQ8Okc!P+$k7pB*?7-QdaDCoX7b>aBb zvk8c8YN?D;29)LaxOKwZYKL67y*(>Cm#p90HaV)UFfiBeZpZZMSxMlq5clNH$}f#_ z#lL@}PojY|1cyw{_lsm;-FZo^>ud{A(~qzq9ZE+>M?T+Mg=V4MH|)h5J8VRyGhGuK zTgLc1N4#KJF4JW_-QC2%fTn}S~ zle3_ZWCVlW5`#(YuioAj@dpqQpsKKK&`1Gk1&tA7G_>^l*n%MUcHKKX&aW(DI9P)- zl)b>kgqY21**u`zx<*=;Tw(*b3I$MbP^?uwFNO6OZUWyLYg zNEI5zCD@xtY?X#>_gtar@lys79(uf_V1L5w-IGn;=_t@L!ZjHd>inDxlm{KJA~IPe zq@-5#fKhq3*om-KZ+8a=7A^*V2!Q#Y^DTA*1^PWix`C~B+f=snfcOp&<-p zZG_F9RJ5K6Q^pg*-L|~_SA?c*NA8|J;>~9xeV_^8aIWxlAnNqoBx5U}JiPFEXl63H zDpDNKnzE&HCZ`hL*#JTp=zM^tHyzM)$?Sy+8AO?aiKZaWc-%c@Bm1Dfb&{=ErWJ5p zm!HpoI&!c)nLe#iXNsVD^^x+K3{T__l_iED?AqiC9*(t5CAn%x2-zXzM4&>4WuEx)l)2a=I{c`I$Ngc z5LKX(0s&DYm|wQ`+NN6ny>>&ZhaWNd@ZFulkBa%^k0yb%+*Ay`Q}dC|&l^+=fgU}L zTdekHc!#kDi3uo*>d7i6wB&i(0boNOFIBTz$K{MBdcFSsMD>1luV{2=^um%oFRQj_ z`+;6`=(9|Ry|?eE$rv6oVU|FZ7@AuDDTJo~!P7fLxRwM#hU>oPNc)Z$3J7fb2X|9mB-x_=ljV)f#4)%MhEZ96Y z2bvYDc3XWr{9$H*{~}^M6I-r8efcdYc?rh2W}*#PXT5wEg%}grOGqkg5)`Y$q#Kh|ei9!CS$(2E5$j|wRsHmaezu(EoU?2$D(3x)% z5pt2d+n%FU!$1jh{Bg&=jP4UNcY%@|Wy|5=_lgplcBQ<6CNSd^zJxh7g{-okZe`l& z>)~3nJM_oV)hSrmLtXCgPc111DmvKh+agev zdHhcGP975KHzPBr#Tb`7bO9vS=v&#zaW3n?*8kPsTZL8iMSY_iP_Y2%kWi3j3rIJT z5*q}hOS-$HQIr&r?naQ7ZV)7-yV)SUX@O10{^tI_-+Qjl)ww#{@jS5BTx-oa$C%?6 zBZ$jkRvTd_fooCsf06a{ysRh}aO8GDM|KH2e}6GdWb-=%m5P(Br04m_No&!to7>nf zYSd5}&+y8@?&h$vSyLo#8s}kreD5u?fc?S2t?-DUuu1QuzA0jCTM}$dEdvt5Rh_Gj zWyj9~BI61FOAdX8$wU+exOt#%J@fN=j}k*L4bZx@i=~m#9m=Ay$5ytw9;Ly~Cb&bx zDIim-K2-Tmzz9R@2Y7#OpbqycQMY;w10$d)d9>Sp`<^R zb;4E~00~nX3J?R&{`}8~EAg5|kUPWlT#E+Gjz^40gwZ9dDyotP_g(>y#LEGB_hPLf zooUP2QS>gIT~)PA_FEadPsEtncS?2q*WB(xq&_GQmPzGe{A!k1K98mWAQXsS1wVdR z>PnU(1U5R#r;Y?lPSVCq8aTm5bi14Fc=y0%)3IBMF1A(yL=u|1mF8k@~5 z398yie=5R0Nv|qqQ9tLwQ;Ml)E9r%P_6@<>HQ%WjJ?x>nwI%X~7M9h-)Rhmt6YHQi zcXEX-Ip?72jVVBXpNoM3b}01cErDyiRh828uvA2sVE?Ea8fMO#6NvQOmz>d^p7v(r%dbna zSP&`sOGT^e>}BkjD9O;VfLOP*^j@US!>5O8+k0m2(~7qhWRwt zRCVk}Ww@MJ`@8+RDT*Dqg91n_?;ze7)#$)`o&_%9B1w-AcK}W%lJJyEdzRcoxwz}$ z;);TtoDu&{qr0KSKlBRk9qGD)sn1!P(2))y6{oSL?s>cJ5~QvC(`_)XNvOp*JT5d>A&}fy*`6FRl>cim=BM& zg5nh!#$N@+Y)ZH#ze5vsb-%dZgUG2e5fMd$nrHP~Z{-7UxO&Oz4g6ci=qMS7FyG%F zSE2MRS!04MIRcfX5YZ#^q5v|_2#Z;N!H|FmG&P*h;<%L39HuK+F$YD5;vGJ)Xne5t@7WjWO zgu-{o`(3~Ss+SoG9W>&}J|Fbhbrzj!88jnZ7mP5rx3@@kpr`*{P6Pxr7P zp-yG1;9!V=ijUVJqJfGG%%LEbATw174)OETL01P1FF?lol4|q@6th{1BO;`G*ZDPR z_1O0OSbnriBt8~7Y}gg|Km1x=R94QVGrL!st@G~7E4}!xfbV=AY(`zL!pVLQr#5X{ z_zeArGq0V4yyH!{Y9Rnu`T5DN&Us%ZLST5fI5?wp6cP6@bi&Z8Ex{tGC>|gc8DK00 z7@m7e;Loz|GlQBM*xj0NQl|u|xB1M&B9yj;cM{zmGXKYGH2?8EtC)$6&6KJ(qNkI6 z;^#V(jr)vog7C-Vr{-NX#6q8*`n^PzZLGBLfci7Ts`T%#%2V9)yzA>(vl!yWY4Lna zOnksj>zMm4;4!fE3yvSV9RkEJ0JjPhSrWH80qUJ{ktA)Ta}J$U+W_0>`6BDXK{c!y zJAMV)A7j&Zc`@cZf-=}fea&Laj|Mbg+S=MW-GVNGcK=gMw5u+cS7`6W5zGBu#NT~K zZ{uh$b;~C^gLK#dNZvplz4|N0ATESJQu_j&GoqTwsHumZI7FM>L2&3cf8hMhK|WZs zat{H(L!Vzch6&?+5!$7#;nZV%J>B_7$v+;Qu>I{cF`A+wWo)a>Pc*le%Ys zoB!_yh%iCrl_ZUdw0N?rif&pOdUx>A=2+)#|n7sV_?T8>G-Mxyjcz}R?Hi;b) z>(HpYB)1TY-I*@%>*D2EUJLoX?-*iWPmhXEX&?Py^y1RhIJ)2N%hL?L9iW6um(y;V z`jt|gS^}oeH94uW2q$^^&gq=B_G;1EWb{ZzI0>|mWzN8iQ;TAE?nZCFgia;TxG{-Zh? z_GF*VGS+gY%!&mq)v;>B>fT;{t`&}1t%?otM|@ohmxg}zbM(|VH|g*Ep z0>%Q@@v7~=A;UcE}I%1mt~!?U%sTepw^?tza`QsEG&K8x{45Usd{ST0|R;_dZp`f9%4 z^kPf63oS)yp(xNP5ydBs8*tpL>EtUYQQX2+HwpH2sdxYyFY3-FD5z86lv`BU+?U0r zt(^+rcjUKqA9`<86wx@=2j3xnN?UNx(Ro`ZFrsvh=8d{&qu}?QJMK}rQik-pNcyc) zdi(rIdS)6mdH2jJr=6vQs|MWW9_tZ?p-(iXK=ce2ooX2nX}H`n^9l$gOK^!eUmTfP zxJ3d$GR%$jIVO@aCrMRXyJ7q-)ir-Lhd62pTRDlG0dSHteV>&5_1z{K3bmYYn*?)5 zqE1h*U-^;#tttvgE6Q6_buC9XHa3Q$czv9`%Nm~lnw{?$Mf8~~O~2PpiDMdd|0j}} z;q|G1?ZX^8$q*I&Mx)qdR?THT6oww#;|Q5mHq7+vF2iv^DEy*;mm%OH!_N;Opp(ad zOJ0Q2=+>k7`qnHH1;3c}_eD!6{q}$T`7^<;UrNF##jX43xIY7DIc6?49J5u&_h5-o zFJ>Iom>8GYCcjV+{k+ z#nKUv{x@iXXByA@huDxeXp+BzC=kQ*io~R(Y+uJ5k@(Ke&xb=q8~}8YM0T8Cu=DBS z3*$A;_SvRVi>Z$=i}6F~05-tYy1Pg2Sz2Tpp0&ZneE0>pWXIe!idZviFSy{I8pP5` zGD>0ObH2E)bC2@NZzLnH20sC5C(!6hS(WYP+VbS~;Jt#pkUp6pb4y z`A~PhNIchDc#7fW_eM^xu)LgodfJqF0=rV4viQL>@wz(YmbD|Zv^y0pQG+o6M0E>t zzr{z1p{~sm+1&Q^-sNy_ve^y}7tfpMd#hdt^*Pi~P}nIDtmaKVtD&SzQ#L%tIsxLx zmjzejq0(shdiWI_eHNIloRhyM!U{00uB&?_n~ zHqQHzA8G#ENBkmxqHWbx2KH=@JC+t-n*6U5 z)n8^?o4(i%XLah;N=@kFb=;Xl`Fo#X5p}^gJt-BGKCIA$ybipBPed#8D)Nu%VuGx(7DXIhd=RX)#C$ONLfS7uAmyxU$+$IYU`3wlWs2 z;x$xsr=Ta4HN9$H9*fl8W*L1q-9udD!5AB2Tt|MBar7`{<#9OI&DN67)mgX}T%oSC z*FK}tBPB_deQVO|av}5S?U8E6^v^i!>4QluoMrgi9mBfP^5PZ!$ItAENJ)!%M(*Cd zo1C1yjq_#SuMlVi3Y12J3Ju~RR5lv}$#ll9l9GWim7){aU1v!HSv3uh;<;yx1jm({ zH4TwL62AKBwXgMH>YAE>3=+y18FAw(kuZz`;OI-J?e%nWSb8Nux>-eTz*H{`Xd1>QM@1krB`Y;;hK(1%;vYZB>_ z1c?hP5{Pb`gU8T(BGR7r4V@}KJ73MUM?F_1D;*3xc1F#V((J8qUhCUqhkTE23sRG_ zm)Q76&-W^SzkIpj`3Nh7@pxlU$oH~3B|}0oFDd?fvYN2Rut8h)$MA} ztRH^#k!{g;Sf~V@t6bE=$kA$@>Ppe}NcLyhsEqr%CMIPuXIcy^DQj(Fs8gKF3Onc+Fo7h`phWy&iIYf#bruqEh4Td zM1i6ZUg7AZ!$u-tcEq0ieqZw;JJh{+Q(RzQX@73s#7VNIZ{ym4Sr9|V?sJ)K zS=68jgTQQ$$NiPVD3q_!Z^H>$uYR_jsz12&$p%o200=DLbZiMFRs4e1`n7e~S~WH& zS&i8D;km)S#=hR}{e-;F2d&4E#({wqlf7_Yz_iZmJu!AnW0K>0x;tCpUY(>U+A8?r zoMZV<`EkF8Os-nucIi>$(W}K;$cnhXKdP%xuinw<{rfEbg*r_9EDQ4hlI&#M-5jf} zBU^>kn=ZamqsM~D-p~l$xRi|`W+Aiw0iT?HiL>-vLH!cj%y{YIokrWy$0IAFjk6gU z_>!wdA$5BrZ#&LY?+$0U?X~xuAWz>+kB9F(EQZS*a|N6`u5^adT$7YhXFV9<;uSB* z7d_l_JhzwY$|D!_XqYf(pCQh0N346lP%CIDX`Ms8jPUj%==N~-xc;C>^lySZWqLO* zhcb(`(T;v{y~5G<1w&(*35_VyDSFk@U&7NDCdJP6YO#{rt?#b1&4)V75IlxhE7UtF z`&OTe%9QN0y?NOHnGuY}7P!gol2NT5l1=BRxyy)k{<+2HsmOZsM7BDCqcF_JswDrnYDk|#nz&W5k^)3xPrRc7&-D*)m;y`q90s9im zBp*-rG!qx(FKH9}7BQL&n6TB={}#lQ0y2P1(D7fW3hxux@#e4t$BrV^>E!G6u4{wB z6XK>Jd;Ml^x_aU_ii$IBOFcN&<3fvJvqt^1CK1j7LvXuE)+sAtvd)6@^Csc0)l>%t z${EY{Pj_P^L#0Go_@52N#IN8$luJ!|B0VEdU;#4X7&>V*B5llN#y=Og{&gJL=F(c# z%{=nxy?s{`=GmDRQO=UUOYfj>gkXKElj67;>0wW()>lxN)4u<8@ms`xaL6ur)cz&b zmnL3bKCb`R@?G6vhYR2PE7@>$KkRI29>kP{QI@%rLqen(V&(D{fdMdK7N<%;MqY__ zu^FTziNj{9q`4!Jg3W8zYTA|A#@n`d@tahhnv3a%fo6PFQsuszroF~oS_JQS-%*Xx zE{}wsJkI0uACxm~V$T&<)9(G+a+MJfDV8MANs~uyb)o%V$-@0bL zLtVMey=jt7n-r?k*+lSbyad=6uqLP9lTuW$NL~w>3EeDxK4ZZ(%iHAHS!JTPp0zjFm%uYYCq=uq>>xp6)_%p> zZk3g!W%TF!cFe_4NQQIFi_i(7C;4;gn68xT8zs%_lcf=1<6znBZ;NNi8k_`G8`tqz zMC}}A29q`n_xK;}=l7))9m3>8LzP`0l#Kq=Uv5+zbkXzhP;=!j&Vky=BW<#}=4JJ& zr8e=1be&OtG%Z=eAO3Fp)73MefN}6NA1~RSs^bY#8cyQ0tl!{7=Qa#6{|q?w)>#%5 z_~~6%l_$hkbJX*dqDuw8Sy6EtqO7bvMGAEaGEEC3IVY6M+Aqw9xjc|7Ecs*%OkHJZ~k z<^38>cJNEp8XJ~j-PCjy?gLlRfgZ5hC++!H(O^n7)(Qh1Y0gDV3Lxc zt^gdrX$^%jK3JcN5iA2vBRGtc8Kyv}Ql*|SmzHw7&1mkP)^WJzKuSe+1js8A5cmf`TD?|)BA{$A$wZE@~*J2zA zp~Bm0sQs?e)YXbCh1eHgUNsFjDNmNCv7K@L_8)W{{^>84O0LGlem1I!JWpLr*kI$D zDJe~jmwSj5LBG^8OkZj15nfLcgzjA2x_6%C;_I1`q~A?lF|mY~mp;gSHfn%IOf|fK zJR5qFWioQPEx*SqpVgw~>jdrht=+##bz9VLp|$>7!m}!>iDU95@SDM< z2Z$71%#lX7|%xzk3iWEL=ZOAu1|}&;BLnt(*(f&6B1_ z$k^D}apzaxaV=cC>GY;HiX%KfzD72x^3eAH!KjQWRHypS%HyN( z-QWIoW4;@=$K@I#oGWwl-ToI#z57el=f{0%X_U3R*L8>EAfIO{{-gS$HAvCXHwft|f<+OvzR zan^nj|H&)99ly(?oB0dRCi-Gn2A2iyQ%Vdu1(y@Q^P97(=A)c?;T-{e3i%x9e)K>L z&vDoK6s~OE)9l0(?L7CM;f+BYGc`-@*4*P6t&Zj23(WTc1}LR4bhHkoYM&0U7pYk; z5K6FTKeY9`r9mBA>>KWp>#;ISwbGU{zcfyzd%MHh?{OX6U3`}DG?U5!^|EUdUhF42 zwfF7B+@z+V!QST1RaO6o{v5b5-uKH{(57-olSM}#5@TW?IP>mUTbdET(681C-!~7^}yE$c8->apFuH?rC#0E$yUKF zkU3aT9szsL!uJA+~TRZdq-^x&#mSN1Bu(Oob`h@jaIb$YH9q7m~6k6@hZ{Radv4~`= z5hAf@`HRX-OGz1--)V9YJS3yYnIs2zXp$&k^Hdr?x>+nlozG)tT6opeect|fuY5{$ zwk#yk{K`Zgs5YT?u{;ly333mONIE>`^8#vyc2)NZl-s9&)S;cK$O~X z+Oq~MjItlBVLHT+?O6o6-t@=lWU;F{ii}*U+`{TgJj`x?p0*8x`o0*$Hv4)qWu<$#zT#QI7_&<` zt6-gdZ5>O(W}=b?GA(_t$T{OjueR_$+yy5zCZSse4TJZP1@@9HAOfF@I(z7znA6SE z#NMwEZ$pUpXJ+U^%hnkRhfrX5Z4-I-?$BWT#ofy)^FbYLoe8F=>%77F3fl7~gwtsP z-X+^%p`C1tuZ`uma2uS$X@&Mu*&5R@m)!b7L(BZQ?%g8*-dh}eVqS3^{7G4pl_(5= zlxdl%`R(fR_2ET}DQY|dcXlL3%tvI?*bXVPFj{@nprN(!KZPl677#ax@o`t{`jQ|R zZ`ib7Y+2`O6ekTTrNXxy1=HnIL2_irWjDcBrga=Mj5{Jn`ro2lq-H@!yDQ334v8rL zi$sCVTi)Ye?7TBVUm0eL>QZb{xp(@e*!3H^LA@>=7yaBAAfC@Wy}0%)nPpbDU-)y1 zH&mismEnJTk>h5=x@|*&Q+lu0wpMP%=o_0GG~zq)(~L-Nk{ZmL-MR?tDrEZT-RS#+ z!0&HoD7`wiK(nT8{OuTBxW_^mT*GOZg}R}s^h?uPC6%*euzD?PL}LOVXbdHDG3H^J z+%$$=&dHb*zpnj(9c1uo+WKjj!ZaXTcUr{r0$xZ2L~hEG4`jD&rhuDk@p$#;^K8dg%uz4?j@}v9}ai(+2+&BEB)`i(`7KH z&CKDRe~uj&#}6|7Qjo=ln^Yg#a&BRg?n@I($EMBem*(c<=5iXgHVV^sT_3Coa`37v zxPPzty>a|Yt4g5^QOo1;C+v4?MXUR0j&yS>dT7tqH#VmFE)hL(Ua8N8gi`ko+AK1C zYgMLgE$d4C;UwY|JJt81yNy2^YZ~W?i*b!lmM?4!iivo6Xa4v|5|u$cEqrlN*18R> zF8X-mw23|Ucom+QkiXoidS_SrhF4%I%BV>nJ4nT1_wBVXN9C!Gg2$hHUftYQv*nI2 z%~j|j1c)3DaIFZZ1t*J`8hhT!FecV+(1WL)Mkop z(F`5A-|Ge9?}u3{;rlqJB&(S*?KY{EbOPuX+}$i1QM|%eRh5@8c~X#FX$_ zo#48Bm@cRXOzxzv?-R=G)oY&Nze&olOn&30PTK2!OX(281(D|;*o9n5DtG*H5qYs@ zzGcpsI?ee?Cd2Frl=AIm9DMC0EhSvZnQu%ucbq=9zP!ZHS>?E^lEsc|RW(^z#7JBG zu5vbu3o(h&ojE-4ZEkm2gd@zOk2ZQ(S^L-3|J(o|W;|icf?IhKqob%GLo3B-x_7_c zPE6&D?@Vx$^soF7_n#`!#+l2ngl_yP)iO)XZp;_FNY2zwt9?UBwquZp6UK8I^$@%c z!Yq?7{5MY(yP4ieu+M|}pplYUus{V1nmXF6y>HO1l?t5wO#IdAA0oYTe6fv8{z_gG z5tH)p58&%_Iz1m{Np^L&S;PGb(tE*{1ioh=lq{DI3@GN>>Q9l*-pQ&!{P4?AmAkm? z9;^*1;Ea6^I`wq(eJK_<@bB?}o}tMLm=(h|#SN zJ9`#D&Im-F#udQdyY+ZTNH&U^ih=?dE{_eS@*Rk|A1sO-qPT!+N7(ZSxEUi34h|5A zA4Nqe2^EF+M=Hz#3uX{kVXc+Zu)NxKt~yk-DqqngMlDo=q90yXrv>F)O#aJFgqX*C zIkNOt#o*Sua~f(9rM~SbbNTDfJXB6eNM8{%iP>V|^70aRx)`*0o%p?#k&yu|F=%IP z;9P-;RMb#c)1SwWCUm)Fg)&ryu0$}aPfTmL&gV?bSz8il!Z=%BQ%5C(lnD(t3Ti=ZTP(~XarlSiMAbpR;7zqcm>sMx^j;^tq2=kbOtjV}-x5N(|> ziuYu7c6N4eYX|8J4 zl0`+IYS#1zo9WWh5TE1YBaxVe{g9!L(N#SxLVKKk?;zqF3G9Zq?btM*`S_wdw#MjF z2T2;DfcAC398r|AZ7w^rojpC1s%SnA7QsxdnvS8GHhJmBCe6XqaUQTUb)TDuDt+_| zG`*iAzVx1VU8c1o?~;c;1v?0Y8A1cMYY zoGE7549oeMI*@*9Nlo^p$+XmV<>vlh=FlNPj%*Zz?Ju@80gJ+awK8CG>&eIjwVO`x z545K@Jfeh|hkviEjA`72jA^A33?fTvo8>DhNn21dW#aymyn(&8xGUoN4bEnR{i8pY ze!BDL_`LgLhkNlpp~iz;90TUX`)@k zY`JyQMdE$!hW1rn>n+AZR_cizEiDm#17zgnpm==;L2fpbCO|90$IUahD_?5Nu^3c=MAC9Fw zS;V}_In{`rr9{RXnAo`Zl@o1IO^ldD-F~bv3bt#>5AvW7sGk6CQ^LYNVy8Ywy^O%o z@(v=IK|sK>D~uTYP(*ZF@-n+O)u&W4^R^hU=Y!P6%)-Lu*l(noM3QJMDN0mz7l()E zkSJd7?&ay*9s$xs|8>=HrEit7crYSfpS0&pG&XZC>#iBDL~5ZT2OV*6yr-RIcx6UBp^Lxt zIzdSmiTAj|D8fAyjQ&GRQW8Hu{|B*+h{(u<#6-fZk3WA3QL4N#S*6jQ*ZC@AvS3#R zjz@N?`c0mbPh$-4UV7_~YTJwj7mF`}Vw(~6?0nj&7>AT8pO9@cD@Lc9m#F8_+6_PF zt5*cBK@8$pqsk_tm`EEagIe>cc0LJ$&pMT;sb$EDWxw_-`^fMCC#jI`b_6trEx1m6 zY1!3ON_0-C|6?@Vq6WA}c1A~uULyST$f>Du?IDhs-2Y4|T+9_^AGw|;g#iZ+sWNdfL7tJwS}PDQzXYpff5s9|A`zghwk$eT& zq>+>PFu=Zie}$c$UslF(L^V)jUn`3OxrkcP6i-DsMrR{vh5z#cEKM&REFR{BXr|ryAtlc>Sk`m(LTyHK<#95=-u8$O= zwPj^xe>=1P1sPfE`ARt0vh^BWnGq^@SQI8-AUGdbm<-ff21#xp@EP<+4fX}= zYu6A`!OG4)jM7+RZ@V7bj93fHxkKR&s&xg;jQY8wA>XJ*jKwYybeP;vOew15qLoY* zCBwA1!K$#m$cndP(p@r(MA}z^ZNu|@iI?P9%E_)CgcCHd`!j6o1#IpQOS|kdeNU57 zoMmy&GUzL2?XQ)snXBo|>K_;|?Z@7mwaBbtan8dQESxWj+Y4Fz zs8ThlQD2sn{^*(etSv7paO>)zO>DLhoJmpUy8#-id3ynvNbrWQw;bXjIA4`bKxKUH zrwe=QLdo=w#b8QT0-m%RY_L(2ZvK9HooTM5cFO5jzzQ{HwemIR3X zu*x66sQR)Y4Tw4JR>y!E+{dZliK!`qtdGwoCZsNQz@2K|BD9+|g=) z;35X1c~T|Wf6lzDTAN)mXZ00v%Q8bRw-@<_hOGaRO`|Dd!vewy9f?-+yXZFzqK^dU zg-wi73-I&gW(DkQ_+V7FKiJBB1uD~Xp-WbkZ>B)!C~(@P<>ahTFK68jdlzJInJ{UY z$Xz-&r@0>D$2>a0OiDD$%pWjABDFL;EONFWrEmqjnQsi^G>T7L=z*^j=3a=TS%ubp z^e7cAxQRS;X%oAVB;#Sk$fKu9pbYi1Pj&z4#+x-AyQX)ljY^bc#y#;!DHh2@hj>ui zC=A7ZG7i))pWb{&fAit*q2iR5kDM$lHse^eb#y57yhxOQH)FuPPf8{+u%UUVG;IWr z7Ba)Pmrm~$7uR46Pk9f~P-Nqo4uxRJlNAzmc8|gxg21_?MWK?#8I0Kb ziFiqq*NOGW?7L30XkW40^HpH-E-qwsn(W#JIz^zZg^fSbavOpGYHGUX$4~b79#|^! z6jgm=+f>Vi54Bq7t-lQbyJp4c4>rsW%Brr7{6r62UH^RNjF2(mPWDqY z3CrQ5=!^q8$hTu;88KMDcdvc{D9=>#LNcu*C@2UZ@-sx@k3BcjJ@$tm-?4;a)K;I@)2*839xH@65EQpz4E`A_9sSN+#L8X!Qrqg z5stxHd$=Is*fD4?Iy(&s_+a~yB>SSS?ZytUs%3KJU@)^($ar~8jT;zfdE0G@FAgL` z=!N++lK8l|fzJ?h3%65XMg!?UQ+3D3sIL!AvcJ+&e*;;5CAz=G6KbI9QVAbwyBKt6 z_PSw%LT|Q<%V}>Kfv9vB?U?hq2XN?fpQHm@M!g7sT;bbBOFkcfW(8>$Sq2=J-42k|wdoXL;KsiC=f!3HKM zBt%z%3EZuedoqrW4R%3vL-$AkzB!chkd?!6Fq+c+iiHI||Ac(^HUE{)2fSYG>-(R2$ z1wOu~?+|FJAj-|XCJ^ukH0$ZEt3_ukpTX;pVH62}hTs`SO9t2t_XUR0@wMBHjEV{d z1dBcCF==VGb@L!CM)a^N|(){k?A$C zQ-R{MQ=_c#{CkPVBf!)WAV&Dg9%sSur6qPh@b!obs@vP!!4n*o036(=M#n7(B);Lt zX-Q0e^v*u7Ut=8LL)!6aUhkuniL%tx)F!ji5)w8}ih)~v=oau=%YwisC+YJ;{+9VA zgbfQG<|Z*Eh;eW@u6E-bUNGFk>l`xYeCR~+x-8N!jhHl4eePARLKI;>pU^hM$brkf zx}~Y9DX*8FAQABsownrmDpN*al~mW(ic)zq#zCO9c_DwcNptrZC{HNe(ckf7O)A`8 zbMp)dl$O0VPcVMb5Z<*sd_b2UuAFzMj6Z$4q6zFeZ?P@qVkW+aqUxXC6}EXJjDv#{ zI@|3}M$PXSaC7&)^~$9Xu=ED)X@<=J*n|q_lsY=z{VBl8YGZ2p0>LBTdx=!5viXEN zvX5jNBF``kDB9$3>Ok!6si#_i=?kwr57`13rFE`~pI_@`%eLD1(`Nx>fO|owph!w% zNazbunsDklhwLf+&X{~=9Y8XaA(9G23jfY^MNdMvgEP)U2x$Vhk#w?=KL~sHg0TYd z&_4Q1TNkTQVyAjK;Jal#I0v4#845F%W(p(e4vGS<{a`Z(?@4XE>QDRXu+hqkWdW6e z+zbVD1HOV{I{{!W@D~U3SvOX-Os>kfi-SY?rZBj5|5*tmewlt#zS3DUp6s<@ON4I0 zctt-%5lTMSh6wB`^~HF z;%iB3oR+jck<(e*%6b|la`4*cc#scEzXK$Q!t(^?<^dX+wDk&*8@v$%kq_v_?ibJs zyhwDGcB>ZIMj#@i!7m_+2+g)j^(;Qzq8q{n-|NGk=cmt0dCjpoIhM~w#lbb)2^r%L z6A8h(P}UAO1qSWcWPiag_>4zWy|IjeFB|VG;JMO%QeJdJN^N=ey*9LX6X1tLsDNGi z(#{+Ru`npa*{~7J-pfgO9K%#uz@r*;i+*4vac)Nd{GXS1bdGfhSy@V=l15;IpQG38oDgztB+{f z3%|?vf`Z~Nl;AtkiDM<*11nVoY)Wwzlh!qjnKAM4#7L2l?1Ig6Dpw8yj5(@Llac(O z&?9_hLMx6HQV2=^*MrMB5$?}-_U3ael4)}?BE5+Yw+XHkaF0WT{1S+v)flFM+lku- zXt4&*WZ+HRV_wA1zJmZ%%cp$yAEI6N2I#h?;8Mb0j*gC`7O5yHDapwn2=W<%0v#wM z00VPcKA@3*pASTh;Hsd-i;x!*QK(h`T0nXLYToFmN_h!d-UEukpjs<4;0^`?9B-3H z3c0{#|CWQ(PgqD=Vq#BA0Qmj?#ZPC-$B!QgsXL=uT3XPAXbaG9Le&5E`Vi3k?7{yh zWrME9XMya1rpUz3ewG4CdxP5!pcNjL7jbkmXxhinw+{YHJnb$*G8lJKB!AKxxKT@a zkr2{|c)gH|*&G11KJ&0KV0V45Sx)NpJ1~*wL9?K&a@q`95-?$;dHKQrX9Pi7fVXP- z7OiUd76`Bx7Z=%W|8>=dD0>Ew+{4E1DhH|9f*SaM;HKS)?k^Y^!nRUr2vPWAjsEI- zdTjY={7w?kM=J9T0MrM>=j3Foy7?}b`T2REUT-1E$jI1#iAv&g$mlgCa0$EUa?SLQ z-t2p)t`0*6gI-5+G29mb7zMBarmK}zq0XP>p+wrGFy|IS|B$FSo$^=Ty1b z1E0Q!R12Krng}np#tT~fSo1@&b`4r}K0p)ba*jdru}}r<6e0}mfIou)+{Fr=M)-$J zqg&k(Dtipm-F9PP<2rJH+dazz{bSGI9xHYG@`yu>#;h#yJY}8}caSpyx87DC?T9>U z$r`OA&@yL|44P4o;*3ruq~;?o?wX=q1v=DF0qHVzLlw3`R`KvH<78t2@EwgeK)5X| zUm04l_R`QacXS+aF#}fgh}oZ@m*X>rFt(QxRa)oSlEIyijl({;MS|%H`An2G`FDP4 zNlYa`)bF}J$r&V?!xrgc{syp$eP&UcEbtz?I?-?8bfc(TLAWj>?D`R826o&ouZ@{`cns#@K2V1F+IMPI+;NfsP#4?IS#H9#WuZL=yYJiJQ{ z?1GGVBgVWn=5ssJlQNcPt~}!5j=Mbb(R8w%t{w#wF)s?87OF6HpS;$F4Qxk&)N)l* zs~&9kN&5{$;IiW9Sg!v5Z90adlauSS-*(OV4EV>!$0z4;?$$h?Wfk`(HfT!p1c~NJ z57=y?93E+!iRIrKmgZ3VyDWniIGwKeeF87Qg`NQ8Owb>UfqfYy#6-!HzO0J?PLm>l zGBN$*5)U5$Z|<+#d?w2Xq|_bB=}9vQrnQ)2U(0`9cS(@a+UXRzJq?Z{#7KaOtv^($~sN& z3;S0W3=c=_LCyv~r1dT}8CjCB_`!UgvSKrc2AXvv+bnk}01;P+!4G7C|6cf)e0a_y z9&F|c9BToH;Sb7)KTa_=1-^x824sNAK4-q`wv;i~5@fMfzvQ%sym}Du+FiG)*tYr<71Gvx0Hd)LLf^R zm*BNn>pE*{PJ@p-(FFF0)9q4Yp9jG@{LeQMUjVrtEKe2C)=4D2Pc5@gInRLOamD9P zpD=68bL;9fq4@Z{>WW0=ZlLX<+Y6%CFSY_OT{~#J$DXj#f$2yN1FP;>e-H}I+;U8624`{J7hc7RjyEYHbuG5q`RBD4$u4k z?w9))Tt4uDz4yww<{Wd(u?SUCkbM4(;28n}!gFaUF=YgV$3GDe9uc8D0)MF!Qes3v z@R*ktd;igWW@rAnlJ>kXQj7{jV8Q*s^tbc z_qwO!3Mggy+Wf@lvB{879GoeCowJ#-Cc(DviGs@SZie<*;t|29{(t@Xnf&b8v!?d3 zg@vf$hdFA9I5{1EXOWkale!tBqoZ4$KJ^qhgKvy*?$+L(Zw+l6Yq?Hi$8n1ev!uTs zDy#o0{=DM+#02ild(1Ny{O8{AZAIurhkZFbbg0#eL?C_}BtnM1c5v|a5j(t@PUH>3 zN_1VFJy_MB=92LpCckh^(m{z;p z_7{i=37b7nEhQx-Wn{wsd^fA^GCXWSYuq^YRNUR&-J5k!l)J#rHZn5mlWlVR*T1nr z+lYV3a>>;gth0Z7rq`M`@4~X0{gIS66ePUObsYLqiJZ;?UPJ zG2w#c=aeTA$Mna#tituRwXtUB(_zMzmi_A%36SU?YQ-e{?tiC|?GKYzr!$SD1&^68 z+ujKX2;6tGv9Y0z&&H6D2|os%B5?`FC#<#M_gU%*g+)dZsIc zkwKkbf3`E3f8TL)Bv1S)@_)0_K)kv--9{T1e(~Z3+HhlD9u+lpw~vpST9Oivyl?JD zdVzg$Tk1~kxEoJT&%n5VFJB%(b!avHZEb8`L1HO4b8{(|Bq`0G^|?Dbj#z2pLpMFn zPdxPX^^cSJ%@%#yDBY~0f?~9v9bZkB=^lIj($&@NK+tP(|4^7DEh|e%M1-RUz3Gfb zcn)TbIfr0VsS%joIN<%{X~6ql`{Q*|1&PJr%d~uo(cbRv=_QJ1t6oarSxb408lGm7qPsonBWFXy zZxvtuh?J%Uu{@@nW5LOm9@5*B)%vQWxO_Z-Dt+1Uj9YLY5*J5KxidGXgJcdSgAn`p z&U%yOd~bVzQ(H9~P;vzpa(kvBx0V) zYDp27&~uk4$6?lKS2D7Ix2VA@%&?|eC8vo}<%{*G=Ab3K+=|x6pP7~xb7)puOzClr z(i}e@U$c^Ut@>g#mB+X=#ecpcF+7Q$1wom@w*`gA*VowuS!^TOX$yl^-cMv(>NKM5r z7nc@!l|61*dk_~$rJIWmyGsy6tI;Y|TC42UOhFsoKHYQCGc+Zp+PE*1Rq~>q8IKfgzX=*JN)Z^m7|-fZt?j5rMkMBUH-boOHvZ~ z@`pMzj;S}EmaMo`-|^$(il!wIhI`cN#yezlC>u5KU^gfH?CdzX%tVkpsBz)#oSO*# zpPR&&0*g5KmkSFE;#A;Ud47Nml6Ft{-YqflKXE>nOSNPd|)o9U|e}6sB5lm?z zE*Pb2kpy<@fos6(YiUW(#id^PwtRVKX~|%T>Ny66r`j+j6_sw2v-Fgeo(ronSM2=z z1sq&uc=$(=@lP5og`H}}#pq;YWZ*n_ND-(i|J=*zPjB4?^37(s$^1RbbVVNh6ES(y zaTzYMN_9>JCD)c+fgW-73WuMwf1p;RQJkvGnFvRHfD(Ln@K3^>n@RF0JA%P;rl zn^$EOWMv0Li1(z5sJW>7_~__RvmVxETvS8VD<^*SVdN7@;-pZ6clJojWq1b?iU9J2;sXA=&giat8_x(7yr9=29+B8eq!V<`udA;@=*^DCotpU zdb+y<6Zm;}c=-AG-@dhnM>fINtK_(d4{z9qMn;}$z3oD~+$0wtdFb9Mw%KM51jKo4 zC^!WGD(vp>pIZtcKRo<@{US|_h=}Ou5UJJ5OifLViShrRrl%Vmj6_9$53sb~%vhDT z-k!~p_+BhsTwKr`&$avA4`uT0w_WeSU@(UAmXV2xq|D67VfbwQIo>lT z#7BwmhgBFYc=a{VBFUuQc0TL_9Paq57B)3qolfd!rKDVlS?{;s`$|goBr$2f?S+rA zf`FZ!JtpE>I!0bzzV&28HYElg0sN(?k_U(}dP2ZRLIb5D?tZ=SjY&-nJ%rEBUutA* zjMBmRjCcQ_N9fK0oFrEycw>RhOxktm7#O}-PrG}12nh%(B;kK1_5Lxw<#oR2gQd`R zJ--jE%4ib)cVYL~fqzh;`x_bF(I-%35^w*B2Ly^KQD-}UHN>~?jn$o>pP!bNR`CGo zNEt_tOSS6-Ly@5T$LMEx@HPTRx+4jNF8&56l7|eM{?>a~mzccux8-vO(@R6ftOkql z{qrANb-bLlYrmC7BFERtQ#N?gphsRx ztPOagorl1QlC-g5em^PK2&4FF7euX3>4+`G@5Ojp*u&nLsfAU(9g3Kp5`*cLY%V%k<96 zc69jDhVPQmhAa=uZjM1BNF+?}v)_8&lE&4XDdZy(VHz0yfilU5mh2?-Q>v=?riw0YR!CZdnJ_V3UYFDO)gfBj*db2 zV5txLOsk)Gy>4zYj(*ceO=Chw$DRN1dTnBT7@8kI1szoQFWgo_h7H8 zX4?~`StW?zfH^9KRMwvtBgog?`4VZuKP~=fV6dcx0{gZ1lqq?jOg}3=Cq3Q49=`D< z64db)Oh_B&3tlW!6sak+ge3R$!itJ3fg<3k|M*6#8AKBe(KEw%_AfZ&Q3X#6_m z4>*uVP)o$q_dblTCFAAQ`f}*ZyN=_C{$d3azEB#v5Wl-i4yrJg)m7WYa=2vlo7-i)c1$1ZRyFna@8G8Mll~nK z4}Ng$c_i1u#?B7T_HY!5kl=k}L2m9%kKawtcNU+!%k^(B7{eZpj#|MB!3*Jp7O(9I zrkA7D8J&u$JCZ_Iv9vFyV6ci_lo$l!+{#n>;5Q`6UERxH;j8o2e(`3FOZ~MXA0J=e z3OE~!?SA^5CyY+=%l#oCA?g?YDk>_3T(;=aARsdYw>dtMBm$oa2M!DjXiM|w(aN|N z+WB42lINSb`BP7p9K6#9hrCOYv=C8it*8d_u)Q$&Qj0Yteso9~G+lDQ!omU$>#=WP z{eq|L$w`axZ7>odFSUg0^_e=Nr>9m@Vo(4E_<}Uf#n~B&NKxo&gCZ_HD1_H_N8@K{ zngAM8rwkNir?Sk;#5)M`F`aUb)iF8J5bFPIfm~HWqFYA9Qc*G5>;Qf|)KVa!?bpri z*#Ay`k=*=6mTmI^U~_nmzaZv&Jzftnd_I&EFFF6lc;4UL3V5BNyinEAY3Vq*;qp7? zGUPZ>Djqk$$_`~vUo$ojK>Y86PS#X8hQ1$mt*1%r-)|2>gwR7yZWg@uFOL_f&|sC7 zWhSjb2)BPfGcpa9^GS${+sxK62?@2Al*kp26Ua$~4_a1g^$KAS^M3sJF#&j+fNu{7 zGE}$G(S*uk{O`M*w3rzC{e@Pl{iC&XJI}mz@KWW-@$L0F8XDSfdf@?`(c`jLs)6V} zw@rUmYNSb|Y?+yu^30o}Sc^0AZ<*G1*iQDU$I6__aarfa1*YbeOH7VTaT1tMkE5d1&CE>G8d;EJ?PC zvd$BjNH8Sj!ZLKfp@IARv@^^?pc@OPzCbb9qc(F7qTcLTuM<(MNsm#2cJRC=y8> zV%o-TW_*rfX4rQP>T?__8Bg50o>OEd-0;c1G|&4?i zgGsvW&1u?tVi@?JX)Sx1N|gD1V6g>`mxlxS`O^UJLthg3#}y(h09MFU?I?mB3^En? z{{6d`r{~^cJ9wAcVa`b)`QQbt%0t$BF?BBCE`#GWep8DCa8h5sKky+9dLI806DeoT z7yTBk$b=hp7N({ymH;dvuL}(g{W;=f{+NAuhPNPQW&Hdci9)y_^PQ$OJf0Jc!+U=( zcC@!$-rm+>e!bVc@9O3Tu+rU0fzYX$OCo}tg2E6>MN|SptHVn7{heQdhpG7)&DJO5 z4V^NH;_>A|c*H~T48B|AYNzvT4Eibva0ZB3i7h>_ryEE80Zp#Q`+`1lC@_+HGU|2h zr^An(x1OX2RoN`G;7f9VkA-0X`H;%LDTleWU(auBY`kh~#(cT~>|kanPd}Tn{7u@e zQ$kG6;%#!mCpnR1&eX85FnfNuZAlpnBryd91T0}^(oj~@U9=ML)E4$vH*EOCQnQ8mC58tg{9Ik-VL#7DbZ#h=5v@FuFqYp%!`J>cd?Pqb5 zahGvA(TPhY>?hc)aDHn!%vO_VR#^sT?EG_C+1T{Kax1Bd`++sa2oar2>dtuvb8v9r zMCK~!T>f^qcz?k`CK@f@#i0K-lrXzEx-zwYo;w0=aJfmbv4Lvd9dOZ#V|7v5K{$2WXW`6PDDYrAcQxQ5cvLz7tz(U2x z$1f}J_#LM&>PQEyzkLDoOs6Z(gk(7+vZQqH_n;zPfx*{)OWx{y2GfL&oR+*oW&$k; zYs3uvquh4|7#f(!=U38xC&QBEEl1A=s=%-kcT7U)g|$6QCwcnK{*3#y&r}!BmxeMK zG9Ai8h9`QLG~gH;g%1@KwYIhvBP3vT6F8?!5Nf@yckAuun>Yjo6{=1%`P`&>+Z_Kr z3mt&tT{bAcsE8BjQcBOV@DaOR{eDBmr;*$4*-JJb`8Srs)P3f%v*h8eza?N zSaBU5U5AS3c?SKEr2e9mUzmZHR-591pg<7&jQdnMWm%;-K>P3Tvj|rm@USrpqk@eJi&8ol7u7<)~FrM+jBMt&Ni}2)!wGy*U zDhj*B$X2l4+xJQpaR`}yVH0m9cAVkwd~HD}eYbV&efs%qW+bb6A$4xGvD0n*yao|@ zBQCzA|6@QVJWwLU;$oiN^8Fcc{P7UAdwJEew0lrm+n(~_^9JM%d2LRR%0{8xslD}LI=o%FpWD$} z_S&!hc>8YQ1S!iYSkG1!I0i!0xm6P!^(0Ag@g;ZoxBdsuVS)K=`Ky%Meha5s%gy`U z3mu%n4l{jzDlA9~3(PD0d0uhF^{GaNhW1hqydKnLeNQlRo?dTfCSp znm7BASbij5SB1^kReczW%1sY{n^f*Gdrnc&SPi2Q7Vo-t@S@LE3TF2HJ(;PWVk<3T zn)Q={*M!jSuCAq@@Yns{K8HCr&Pi#b&Az@jV!mDJ7xKUnyK#h6;Hz7TZ>Xkvli4 zA6#`RKFFVyn62yF$G`lSQRZs59{wuZE%UoMA5HRrLkzqN;ftr{^&nrtHYxS_(#JQ5 zqWdj;W-5(_r#vMcc-Ns-YN+8x6NzPKKWv7kQD9n=Tgn@_^_e;wZC~HEqHj9t1zcK8 zxWQF0&VVjwsqfPu$3NShJv@cG@7&S3IeuPVnbDeSLm8QVcxhrP69Q@TzI3#<-r2%{ z?RnQ}{}`Lt#^+PNMUo$>&Pj#x?S7xt-4wrVzdyA4MH@`5sBZP{ulb3HqmyP(*jkF` zg4!XD%2c;;R*qX%o@=I)O@H;rI|~VyjFDr`q;>i+WZ*jA!C7QXoy^S4=r%FKnwaaEG+CxH9}lxEqOssqNgh!yS|AKibNy@!i?xuN4x0hPxas7zBe7V%|z zc8+hUXkLpW|AP{2B5TpHNp7FirgPbKBY@2jR?5xIg_Es1UYjFSM|O_S0W2 zWC3(uCBN(;K!w>hyY3dP4Tc+YlpAi8--~{x#c#?_m6w(MZ6Mtyof~4DppYX@wuhZGE zshF_>zU;3N+_4#g3K(^cTJiX^Nq+AOdypG&lQ1wagwK5fKk`q^SxGf*Cb}rJE(dC} ze8})vPdfLx==I;0wxDaN)aR>HIiKRLraY#>|6qDD05PD83XP8Lp@Dl}F$`FTw-aC? z8r=bRx%70;>gxJ>@}oaznq2C7lHPXn={nW2Yp@z5FCxKZ@J(djY~59L(_mHI7nO*_ zJTU451GtlSIN;yc>~H`J>{!{_s@w1$XN$#FStZiy&aq3 z#{sUzx~2Gc@Yl`hR`?M&XBZlSpIe8{?ru;?h*_OkT3Y)2`SbMjG@z}|rvm}%vazwL z*2miZQzwNA!wC5ewa<57U^Fx{7e_WR+2n910=dPw7J8AH#_(vhSFv}PK*{9NF?{is z-@ZntBuA6q4m^G`KTa&BuAZy`eMN_uw$ecC%jJhz$p%Lr6RmtnfXtWj zM9Ax`wFz9bfaBrM?dashqSrFDxTx4sV^4(&1EyPAT1qbXAs5NKdb(wMj#W$8-6h!f zTwnRofXB4i27&0j7T*nLch^Ydjf|)5MfEO^?5L~l?bYc+i^7zl*~~;RqAFN z?Y=ZL{xQjZVS?@rj-|w}Ub&3_E45Yv^B5&se}<8?WP}3dfdX4*U}1qejWID~H|_LOXcUYBI64*x~L8iJ6Y>2N-X5ws1rD zrZWaQIv`~MMC5p#ohW0HC*3~ht#+b3-|kbV*9!-gz){~;y?Jj{Pm2@A;j)uBK~xrF ze09k}G1sv>pA{a>585n|a&R~dKz<=rV`FRUs_%5T+$H%&j4=R?YYl!s4a-k%{>na0 zRIcg{{PWSI_Gni1mB%76o;7+;SuMG4c;*L49wi#gqX1vPq`Tk^Xz>wnb)eoQU6z@W zVz0=#zP8q+J$dTaS6w+jg9-~&D46y7gDEVweOurI!EXXV%OvA{VY#kKh`zTW`M7a1 zqd@bl!&{6u0&og|)c$F}=fD}@K&GZN&N)Eoa5XI|3gcY-+`2|19`+I^`+Rp+-C_b6 za~R->J1{^2wv*vW)w{XF?D>@V`09L&_KX0kw3Unm5V1IV8k|k21+T#2H z52Gq8-#N;Ibm;n<7oDBKkB(aG?8$O{#o@6sl{Ex%t)Y7K_Un^(0P>y%wcPxh>DsTV z{w+~0CnxtOjrXRARuuTcWdvQoc3Br<$6Yt7+gE z)Z%8F=qre*r*1!%wdhvIfZp-Q^uml(4*T)s!spsD*l=FGDy{up9#w5xJBo$8(K0Gm zn}?X?0~$q9^77(Ctp$f?FC1Q6Ue1$E4kYBxRB(LWH*NJ+7{CI2`R;$5fw*<+A?IGW zjx9WCm48o;HH#zjCuUCA3|#C7v??SLK<>F-1kf$syvO6}G&}BGm&JOpY-(>O} z@7bxSx_rWN-io*8KVpwfN;0j|lKT0avAFdVy7?cf`+Ho&Svp>Ev}J~MUT-s-l$E8g zrj|c$2@mQn~9E}U%T08lhtHdW4WEJ zEd&Cgo}6oN5G`2`YXIzrl$I4o{?mlx1OUAAmj%?-e_ZEuZ*z9lv=tSFbuRkrll2Ye z<>ldsj}vT#KUd{RqR-PU;LeAd+w{5@`9g=AMAY<6Y_c^adOd;8d?LpQ z9E_9w#b4kEg#Q9Qw<{d4AU_{efq<_MjB?pK+5n*@Z2UK~2*5pR#SELlKbQZnBL4@7)imq6lr!yGN@kWjnUSGpHD2~(XBS<09UUER^pe)$So%wJ zxJUYv69|vZU$V2a{R7z?NZ3FJmr*&Rn`3oXKSvf0f3ZSEqF(sMuj=WV=o^(rehUny z8NebY*45Io{Qko`-&C_aY_4GkNtW*-V+58}{W^|dV~5R|DYEt>XqHq&%-rAA%)C~s zw4UH`aft8ftpM01GZN#q=^ET{Do~Q}$HT_MGkZ%gPf>wFzSf6n~1oXEbzM) zbh-+X*n}o|M|TxdiAy2@TGaRfAW*^R^ynzt+qbZh5%Sz_P)SEJe{XABmY9E5_gby3 zssFsKUPw?-utLD3QVT7OK&_2Bt0|F<%Q1A5E{6b0-=9L`K>{GX^^d)ndlpuDdP~W= zBmcXK8RdfY5>sR=tJM;WA!DAZ(X9*U+BlJdk0)RK)<@{I_XD@ ziGFEhlanMXJRz+kO4Up*e0P4p>sC;s=yKc6HN0bIU&RMCR}NGd$d3(cY4x5U&JdRZ z=wr-9OV-t`+~~47Y{xecXz#vOe{C{m7HkuA^GH zhvV{MO zSOqj!I&9JX9x}R`*HP8`Z6IG7mA~2F-w(JU))5b(jhM>5HKYm+ zO`^74nUEiO@qO@@Ddc+|e}Ix58`F_zT%FyhliDb@_Ygj>f6;5!xSCNtW!1*Gwm|si zP4M#aN)=MwyXa?-WQT``_rd)&3JeVd{b?ZA@$hI=E9OWaBQ8+(=s4R?eu}V2 zq~3FnnzL9Nsg3BcfIP=)iSV`6pWvPYzt~AhN=dB1PUt4UihFbzli4fzVxwXQK2p^SJw7pqKWaY8ocI>xr-@-VM znItdf&ARZ7)yswXk}FKGF+lF+Ki)0ynCYYWpBJ#f!NFC6xaybL;t2_q=_wcV!qB1q zJEv3U#gDK46|E#7$({_$pULyp$?y{psOa`dt9?Pxy<}G}VLnfVyqMfP>2Y^x|Av!c zXpO5E%8f-T2k6z5Kh#Yr_oaQR75WPhh(*mbY6=gsVYS;N++AONmwv>4IF%goY< zI20X`wWl-KoM$}Jzsf88;RjXE$j2ade^SLKBBE?eLiz`gaH-lh28@!ozNV&O^cYAK zG@wBUyP@lVF`a8;3XeQ>sD5mc_e$|y9LJCB^c`*GdTD-~e@(wZex!u<5nYnIwequR zs^@}4w^Jp=SwZsDfw?#n*J?)FsOa}FfO8c^03+(P``!*e>hA7VRuA-3P$ISa zO!V3)+}vZObkw}1J2lOaHuAdXR9lu(b3E>fvrPZ-msuE4=@pkR(Y6b)Az5MPGq}dt zp)q8gxY(txn-5^0i7J*Bde6Eg}nxvUrpr1Ob6v z-Y(d#y^s8V5Cb?Y)nZXLoXyRe%mIPc5s!@$GmD}=-@lX;T3~!VGQ4xUypqEhdVA|d zJn#YXyknNHM5uhm1ByJ~)SA<#2lfAHE2ehxrrT(Sd#O*O=MaSbPqvHV&4JBoNV#9e zALMnKOA+c#6;5rb-Ozf;!#xH@r52Eokiei`Ca0(cv02n4R-59%%< ziDs^u-zhO*{nZJ5LxJ|R%gf6}ryQopgg-rJ%3NI(yw30s+g*sb`!paGW*IN+-BvSA zdnd_=*1<4evQSf-g6woy88bU?skZ<8prWKQG}$y!u-x-;aRAz-c`m5pS|`m=r->>0 zJtf<1Db0?|$!OWa=X95?9OLhMG8Yx#aIk?2NN~(U;094^M@L8bp@0v3dc6sz4)AE1 z9-ycfJpb)m&bBi<4My|xj`?j z)y9vqsYtbos(Qam189U74fA?~@0fKfv3tM*3mb34v$?wJYL&DCQGdOWn|vbZi~VG7 z;pQuK?x_j_co8-(mthRq<;lUgPHyOY3R^vQU5bZnf{D?BEKuAvDmCcS^Nje%~iWIQrqKn=}3)E z>&_y|=@p{DhXsrL{=%M@;ACM=OF4sKwYCx?@|LWwHKk3X;$Hy@t!ZCfb~0!M$0vnT zY~m}>p$%gQPK`oEBDq&;3Nl}8?8tQo|0aaz!F~UZX6x+~<>bCLFOxnx>EAqcBAE5L z2}v0V_q2HA+$UZ4r!c%^iJr&1;+tyIE3+CV9R&G&T{E*P_9z=L{NALLl-}c;lN0vP zVE|SK(Uj;DQD7rhHkm5u0=`1FfIvdptXPw_-T(DCmP5@nNg0w@wy3{g(Os*zas(X` zgX_q?x)VgM#gf3fnLqko$V8Y0w{cX8Uw)x09JcRyLO5$49Xk8^{dZ8|GqYOFW5iax zbxcT_#pXRGvibI*1K2F)(ntF7KoWDU3=1}-4TOlBb7v^J&+fw6$$H6q!GM^s60&~g z{z;;B$~{$Or-%XVHTrF1Yb&|u+_?|ukhg(ee&&AtUiRz{OsG8%MUR>P1V>v#1z%It zd8!a9G$=)sJ)lsrrDoe`X7*;x(q0OV@*X+c+epu^@ej=bioh&*@3tVDu>xcas;LzQ zP9nz7VO`Gj7}sus&NeI+DefbQ^0GR37xi@^aP$qn_U zTbAG&Pw+7pYcCi*x1dIyacScbqlrgHTgg(XO)e`d>qDtP3=)pV0;Fg_OLw$iQc{8_ zR5)&lw1GL3&=TZ$6vDD8c=tQQ=^6C-t5CjMIlf)O?N!Z`-lmEm5Lvx>ZtA0z9wQ(WyQg zPYR$$jW?iXlOElCA8@aj0lL_o0O;aXXn>I$cQ2klunI|ibwDgu8B;?pZoBpGJh!2C zbiEQChsf>HiRlqfNw)0U4 zhZ|aX77hMNgnVkd^h1K=xam~<(B`QQke^E;a-fJwqHcEVlC-_^cAW=HUes3bs`b~R z$h)h-;fOB61v*|@h92w1HsT&8fR45t4wuEsxS^ zvi)=#m9}{UGJW}JkR`$8fuCEk>FL`FeYdx_0J}nj!a_qgH#Sl-GpU5cNj44=+PFsl zu-)F*!l3B#M?|4R&j-C9E9M#JOU_+0(ZKCl7`QN|-SVSuog*g%xf=w=#0RnW?T6Jjz&7UBz?|t6u_cHsp%Eymm3*TDQ z{gPJj*X}0Q9cQC48nM|z6G76lj9unZkSgKz%48}ri`c=A4X6zczgfFZ@bNCTF^!t* zaM6|ve)RkjODf!4PorOQd1}0T^TT}pyf{eJVvBGyOol@y3m(|h_x5bDEa4*Yr%#?d zAy;!C6-HK+?8$foRNhDX-UV@1HQGB7kXj=4iJgTk>>QghU=C9$Zq5*h>$oF|o%v|K zyfcN6pFqbaYf7T($g_F85+ANJPo~MEX}qJA029sKn!%y9we_V6Kn!XG&CJf;qQXGo z90e9z!L{zHot}8;ZQTAn<@GesBT_>`LUh+(dh+NLGx7AvU}l37Q19AZ{bAzomGhMr zDC5ndqx}Ra93AtNK>HGOXGe$4Pv!aSIou#sBiGQ%B`k)X-gW|tKc^U2Bn z7bI|D+TeRS(Q3Q?>9qU84WQ<4zOuMt!)qcagn&_@oUE&>8@iAE^(h4c!7WB0K-3~I zR`isgc6F1S($+9v20TUR$D$&B_lrJKHIV|nip}UlKu}_h0&2f%X7fg+CO^o8V0ZD> zpl@COCUL#E)hpTieb*si`zp2yKNj`%^+^uoI%L%cArP0D&+z$Rp!bsJ-A%7|9(M-* z$qG;a*w%WjqwbT*#*tc*WfdP0#)3gThoehNY5zowYL(yb;OBelf*c&{CPMC{XeZTh zHgG9SY)Nt;1c4aUYr-Wh(=3G$KQ=En@YD*!eR*2N+nax|+G{9mafEpt-DV}_^$|Uu zgoL~ESTolAq`n*Yp42dwJzJx#iw)_KbBa=ND~(uBkBO$H!Ih;t>){R+lfZm;wO3Xi zsMObF%Fd+i^q9$Ta&O#9i1DJqwR011kjz748+H<93JOd-ecW4nJH;l6j-Sy$TG^bn z2)Zg83%XK6m@SP;iQ+R_A~>LYe65Qg>{y~C;rK?1Il0_E3LlYoF2>#Sw#n_odpEa+ z1hM0mb9RrP*igN=xVRE_)gzq5rY~(oe9Tj0iti-)2}nrV=kS1BR(%*9qo2GQn+uHN zqE{mn$mxL4?Ud2^4Y+RMen>7&2CVD8Rcc~!pIBK^!n;N}DNdQL*>$j|X^tV){R|5a zPrYo2j~_upc-{wpKd0p) zAZ_$cF6`-k6t2qrh51EYWZ7Ij+1j+5k2g9RInPWS)XVz}%N(YT6xi;ZLlyY;!-h^> zi-ycK)TV|SEQu8b|d7&O5o$=1$R<_Qfp*n1n|?dv$FuX&I!rL z$%*;vJ*m-}6rt3stZmn)9@*1P(_yxju9e7_Rn<4fgv;U)%-zW);rF0ObbSytz? za-nsgUtMVPNuSFuEq#vY=^`a_^nv^E@8>_8=vOMoEmlz;zJowWICA6k_gu!@{Mabt6MNFJM?mlv@CqEQj9Y>TwH zzeoQDP+-NV(@V$AdNHKw=?r&HvD}uGy|#;IFEgd>?QHcJj36l+Y3>ri00e4KA!@bT z8?gn6ytjo-PKF?~_T%7PZ#2u*SxxE$4S$pCh;yEr-~DbLXwAk}6^@t3K#YnF#7ZD= z!+>$b4t{#eDEJuM)aZSlL4Nz01R&y=gq>q!K{pB>MJ3ow@R7x%-HC7n;x$Jn-R%@c)}WTT-924RD~6dj*XcRpx^=+wAbg=o=yr;~{ zo4oiWfckS^PAM}b}>pA2ZQ(euP*8MOA79FGYX?Gd|wr@5yB z7za#s9-fZm6pUM%nVAKRm?g^mYol#&SP9n26(`2bMoh}9by0YCvPy~T10pQS3y|0W ztrrO9BDrlI$N9~z^FMzIm$(l+12v7YM=2;YHbCl! z04`eLN-Q72U>WH0%o2fxg@r)p_iueYIWf_qRx4oGyiTik;?vuYYHC1(<#&Cy>*x(W z^Y2YeNXXUj*H9$_7wbNgk^G8{mD1WsUtJp+Q4Y&LRa3#Aj9IjvbL*)3zj_J?Yw{GK zB^~yF!@qz3K!ltc00#69_(5j=4BbAYE{|Dmv*4FeZ$VkruowjAHR;Ks=^f zJ;W&g$YJZrY~1j)6Gc6Xb?pZt73XYPdirPAKr+^W2R+`iB?L)pSP-D?N#$18JXw@4 zOz!(&fY4-1i9p`XKS3L-`c^$@T~ilY#Ks=8e>OD>*40&B7N)(mViMLt9p_= zh)n+z6&+ol%rwvdK8_lKXitx_cd-JJ>EVkuM6L2kuqk4gX&B$1sKv#{(*+OlkSPEW z90*AavOQg02KtW?gs0wty60G3Z7rE_xdZU`Kl(F~of9opN+Go=yAk8RVh1}%RoAY* z{eJah)54q!SB;l<&3I$BUbOlr2qF=QRVG>aVsg;E-JjCkVwzwPD$5ZONLym0oPkKV zva(V`+V>nJ0S2}UsnzE(c~Mq1Ttxo{w5K~bsjty@r)?SU*+t9Bh5DT^nw#|MIZOz< zndDAR2m4@?<+80mpYixwZk?F)bSXmjN{{HyD)d5#fy*OHN=(d4EzZj`0eA77oyR`C zHOGx1AIS;iV(W=?0UdvE7p8>xI5JEE&^Tq{yLF8-~*!NC&^PJVviJkX`C z9-ZO6+RFGP1Kpb->ZWFPT70u(1vWSj6w?6q;dCvQQ|?z2FnTBOYds_9aW6Zi{iuRh z-%APBcO(w9EO$LvXckh%>VhW`6*L4fEv zuC!*t7U^w3SUsoEs#iVSgxdzG>$G-97bVc5qpLbR(O0oM^g z5@$S#>~%HW`asydS+4+I_z`;`sQ^jQD8QD>!4OcF9~|nY$~Lk4`eWP)SA*<+Xz=;+ zC^0Qdse4)?Dd^e;t$zQf?~VxwRQ^w;6NwlnN&cxR*Hub2zhhJUKCjU`e3J1*EkFOu z6w%C|sRT2+zDLf0B#Npv!~^)_5xbO`;)GsxyeuC{iMPrnd%?S{cSB^83xWl&J*^pO z2XlZZz#2oL>SRaICDEcrQYiSe75EH90?OEo4C|HYKTgg{$B~;T1%C!R%tLhS_A}6Z zR)Q}EbE+LlCTl8`pPE;J8ocY;3Iga2$vj4^wJ*jm4b`>Hp_b4odtPHBwgFt{0=tY6 zc7qeLZAHobZ7r)(OG`fzev<$NhUpB~ayUtSEDm!uG9c$v<78!}5j{WodU@g5pD?Z< zOO$=Az&2ik$_LPB#}b}RiAau=Oq{tY55T41p2v_3ys|~R4Z6CJl6H6{>}9%~SIrk?8g=Ou`CPXcFuk{_{28G1U zvU1pI{Pf8`$;;I2?wTG(@mY6hf@n6Rj*d`*evR1x*&cgWEY zn1Lav5U|{qZYlbOVX>a{_R3W?)1PxP!i=Q{fB&4Dk_J{?{=TzhRLC+ciRSIk^# z8PM}ma=<{!%ZnT|Pe85z=VQn?JCu_|9fZ7P((&FhOW=jAJAtvz+^5 zYDPxH`^WyOL4hlhzUJ^K62%Ql#@B?~c`sllG*io>uU_9{0#%hVamibD*08Ad&bc;1 zFCZNN%oV7`4_y6z)F(=v&LOx^YwAU8leF{aqrrfXxYcV4S6g$NnVKKnW1n0dsc1Bp zHnfZf9;$SVuKC?c@CbxXia;t103sg{p4WGg`uGp=4SqCG?T9H-A{l#=(~1l6-p(2Y zeE|t2AO@JNptzaFCG6s?HQgVRLDkeeZt5d7v8!#Nn51Medtyz1kbRt{E#iwtpAUqx zvyG37l#g~X0#FeEwbd3OVPx8@dWBHRFPr+Buv_S(qodRI?%2(Y8&3 zsc;Bdl1b|tDU}tMsfk5|pA_KXmHC^ijXv*lvZU;|2PjqbL zXJ9}>ez<&c0&d3wD-a{< zk#3&}YPJYSl|Yq5SzTMROiNmLpP1}?f7es~pFkGSk4e|Wp-u-|YHC#hTQC*t_wrP( zm7Rv1{HE2_tFz%7d?z51c22?eu@J~#OZV-~d4oWjlS2kYuzC9$kPZ+(xdVsN zJC8dthO)Hk%5ujrl(>*r!-?_hHP`8vLHEGf%+?p8&k!42Zr6gxN%P&gkLbo4^=0Au}7H)yswfE zX0wJN@^g-VabpcuQBFaL^yu6OVz1A67ZeJdauDQw#mazTkUw&!45kmuSI!fNhF2AS z8_Cci=_g?bz5Hvuq$rVq$a11Y@l6%t4fAn7W~d=^LZ$h^>ina-<9Tur!=2? zqfV3ovcHtFjt;o^3RFs$CFC{;gsLO6$sRHcMx?tAb}`vI;Kqwb8j?9yrbU`-!8%;Ea;k#@!vZB6{k-=h2&KLqN%8RkvB?^dKMWe5 z!1Tg#^lrza5Llf#i%Y?M9rc|W4#)ptc1GJc1YYr}d*UJ&^UF$~2}QPyMnG7N;C;oE zcmvQRi3SoC%u2{B5cYsZp@3FqSea#z1s*_>x_Dd3r1;Tmx97sJqL)3`PzQm(30S0z z{nXmo8nsDCpu>bF9h^^`p)p{CQ(MW2T+if#d{%OT700C_?@HJ10+_US4?-`}BXBVg zkZ>TddP=M#zj$+Uc1BH29d;oG%0}>cCB6rpb_B>Lefvfp(Rjkm*M4OC8hC0w{Hv{U zS4K$At6kjcOBSaPY3+C$A{Easn$pt(9(Y@7Lw`nrqIs&6EVlRqsFY!4dFVJr$f}zf z9w3q`gLQv}hgS?PS-5-<_D|9SH<-EHk!ZKulY(53j%mmY%7jAZ#_+sOIi^lz7^7z!SE*-CkFGaLD>y;rHupqJ+Fyit-q1Y8vgq zR9S+ek@J0udQJHr#~r@gydcv}d_9;#N5Ez}t%`}L+iAYZ zMKdpmBO=wJW;^LWh~A{Yj!so_(ZD9=IjAwXt!~f`!QI2r|LRG=eXaBV}W@ z0Go(3S1MIHo;+7vX) zm8<(%f#t zOGL_!!&HD_t;Aqh)&(wy?E63Medj-xfBg1^q7r2!*_p}C-jtD@%m_vH%HFH&nVn5m z_9nZs$bhN3w zk#F`3lS$6bvA$*g&t^kHh8c@L!1;B$XXb}7_~8?KYN79$F}Y=_vg*s1_)>hnTsT!O`eQ}vxeg<3QX5*%&uA(*7&)GHh!?t^EOGm+rCSJv+(+EFZpT~|Z*345Oa zr8WdLDEM`FHTCZqqm!<>G+uX?w7x&C3=VAAy0|CvA8#Y@GXK{cEH)97l5XfuXn*s0 znTL8kO?s801V9)`m|9NVFM=F|=hV`GR!Fawva(Oz3VXLOS+q-d9Q`w|og~^N?3|yu zmSCBW{{LB9Is|O{ts% zLjBPpPy+giW0H7SxGDx82vx5C0ACid^yjk->u1NAmg^4|7f188o;B*0NKZ(8W4tQ6 zP-`OHR*YBOnibv)sWF`8J${OOu9Qi7oI?Ji+Yb=IEY@L#TU;ZR97=m418_+3!$|&n zEF^8VNLE3WWv_m~<7vh~{9_)TQ4gG(z!57L0_hu>|HXEZnA<&=NzIuI9mPEIC6oCj znD(0qStpb!l{pq^tr8m>3lvD=8um)l9;3+ztrs`(#?&41F8tXZt zf_?~d$Dtnfe*j(l$0zT2lK8{z_{f-hNW!FS6--U@o@U%gb2Ru9-Tp$`HVQ|#yX!da zhk4O<9%F*+y}XuKy_c#pKV=5=KDd|3$WM|nwmj9%&kl-zA{=e)5t_oFx4er7&&MYv zbpxRgCs+Dc{7g(e9*y-vuC}%>zhR=0P$kn@$btzIw_GT2|6zZiAqWm4s!~2n^o2}t zEB9pI^*pm_w}cD=`Piz5A22(X-(){XDCwiBB-uT_?j0EO`SYq;-`7DCEiJ9qbp+)5 zsC;Z^-uCYP>C4|jmY(a4Ik_2`lsiD}N9^8LSzK~sx}>++DTlq{(gn}O?DazJ7%Jb@d)`aXoBgPUgKlA(B!sftMoK;+9N0X zQP$@MTi{k`cvUTeopiCZ4mWDBu^V%m?Y#i{7L--hJd_HYXUTSG?Im>_vTpY+XR(ME z!nDs~g8vXinTIPed>))m=?iS;PBqotM#hlt*tG$K;J@(O zE|%ovBd^KR^bYtotRp1 z1PemK8|a#(a&UW{Kq>OpEs2W?639kg8{)iIE#WlyKm8*Gx)8K z2K6||QJ}@-k-q*rq7aKW-%lNa>%-Q4&Dfsd9XvED5wkcKfn@GCb<>Q~)kz0u)) zsLIjLF+ECcLvbP`{IAkEo&*ILT=1l<2U-a-S5CcYnYvar*}Pl<(@G7OqD&;zRWp-m zD#^br*(Rtnk&@Y8Kl!t?>{s32^ad-^t(K~sGh!i32K42!2J9_!tisHco8{^gDVphn421Y}y(%To8`sSquPsC@h#J40! zx5E|IDD~5mhG&0U0?Mna?>3m@PXlBjI$Ew{x-9QsLG$D;)DmkhIBI}hLA&N^wW;(Q z>E-q3c>esq&z`@+5CDUtIBRFI@sjtT>W47qM`p&SiuCp)!^5M0MurQ*hexWoBdfY^ zBfRyTQ_~=2oRA=czvgue%ltApBxLv%P8_|+e`TsyNc$G$+3zaa!z%Ia#mh#N!;LgF zPj)BYu!S#onE@69#o>J}Kr?kXo zR*QBQ6=L34kMeU#1>{FA@-_SVS{IsWHY1@mR8~jk=C*4`dC|GVS|=j&*zGDWh|`=B zcXRV&r~RkPmu31!JCXdKRxSx53+3@EG&E_09N)iCzgx*;Nc*v>5(+uG%teQXb=_5d ze%{UjpIPVu$-VCRm7iA?1=Gdm)p!6bEK1GPtxfybl-73tu^-V*oGnJiZAH2L{0b^$ z(J?W;{q%@JEus*=tXbW?Q;`3n(uZ&e+2!`ynDYs)?$pnFRb`M8&{=up6fQ5goo`D& zM#i_+-TOK-)4ys{<=*vC?k`2XYRw#?^iiRYtU}cvy^CkFOkRjL6nIPM#ic5`IC-;Sny0V2Y}q%_yaRPR1HHDzrbbM8dV?ru)?IG&-=7FQi{<#{j} zz_`A4d^uG=_4#Yx+2vvs{#y;gDe7!O#v7`y#l`NT)kdRXem=G9OHL5~TJ_bWF+t1X zpP_=;%f7O;TiczuaQ@?ge zq67rxZ%Ge~a5;B8{fPOJ`Ry9|5)%4r|D|ro%E&N2W-?{3Ax+__?^xUWCYh*OKf_Zq z{B@G|RhXpO22pO#FCN>ae+zqAYcH5uB3=el8h=oabg`S7&D_(~!TX z3lEaMc$BAcg?s zecBg0@7W!nin>OZb`LqZeTQF^0)Gn}67n0|_PSnE9Gf+ro*L-#v*^x%o5F(m@ryu` z&gen?lFY{1n6*=7C(%S67Z=a+^70-&;?vH_$w^=DI};NV3k!KttUA30I_aBe0;%`n zik0hh?ztIpTfE;A?;2oo`ds(#84G=W-Y!lcUN=?muvb{bsX&pOTy%N)9COLU3u6ws~&Ve?#_KYz-Dc3GM=|imOeGBf!*?8NFq9Zh3Owr*x^{Szp_x# z!3L$sX1!I=OeqCuP*Tf`jgD4VS51&Pjx6mt3n{8qX%_j9jzVMu(l=J05bR#QA_VjM zEB%eS)`V$^2=X)lAmk^mTL>3_z!+q_Ju_bNTrol0k3II}@}UJgQY~A`ZR2cr>+5#0v#n(nUT@= zFO?)jx$&_RlTkf49i5+4`(|U@u50~_#g>|}N_z9f-1U>B!uIu9+?ZG%vBN51FQ1swg=)@i95`60n4v-D-#6a`NwPu2z4E5e_L)c8^!Sjxph2Aa zz2;2kFR#+mBGYJ&oM(p&xmIhi+z&{bU**q{eQaxM5?QueaX;_PxWgwbuwL(?e9dM{ z@2YH(Q>ou}D}bTetCPs4V^eu=f8W$nf*y!*3;o$CD^b9OOYJi zzD3c&pkuk#?0X@z-a&;UseWKAMSt3MGpM(rAYo)!KXwl7#`4-A<_7&a1H-nj*=ZD_UW*06LCZn89`YXBgu=eL$n);TF z`=26{o9sH`n++4(?xJ}k8viVnXpKx{s^FLmyY1EOJ((n%pM0S)+%)o_9{n8CO?uyv zJIBk1!^*C$@`PW1(l=1IWc#ppcb9!jT*%I7Fpi~Z_W&vugd=%2V4Gc9Zz}x(~f3P>UTI{-$L)JPz0xu8?7H z6@T>TkqI&qMTX^M7m-<7`YBF25a!4#7x(%J-9}H0mD8Zls_5Y4d3j4_yUebXW_h_g z#@`hFMW)#s9%2z8_dU)h%2KiLV4^;AvLK5Dlnj|ui;D_A!Z-p&Ml-ii*afoL6i0%T zvV#i!dqhq~TiQXMRB>!aGasGLoQaR9L_AJ5R-L)IL&LG)n~tgJ=p>a>PHz9XH$Ij7 zO|{)u>JhA@V7)^=o7p6NNx`FjCT*HMtFES1p|ulXMd_Loso8E9$Jawc?u)i$A`PpP zRWDZVjY)NolD)GRf?PF&r-141_9I$OWE&jsl*ljM5QtZ+d_ zPMd?NN3+?%qqUM|f#y5WUh8oKSg5-R+O*qnco>CP!Z^v{6K`hM)z#V7eDU(0{nqnJ zSy}wlz!mO1Ob@mlt=ZTrbdN~asbn7Q62T8$xE`5{NVO~qr_3svgPP`7F9MkPCp3!Y z5BxdqEQH)|deEtqyfYHOEA{*o+~DL zNpn=Ui8ByBMSrHa}iBd_QTD77H0+ z`tKG>XB6Ibj#@!0J#8;ZH8P8|P%6``#x0pOVrp^s>x`A&{i@;Wu4(nsEvMV>DZVr` z=t+0D?Yvng7Uq)phz6Oi)WB=$USfE9E*McdaVB2!nRRup=i&T({JdL^>Z--L=MI@+ z;Z6&!F`0`lja>Nuy~Voi+8Ki_(tJd^yv-`}oCD`s!P~xDC~a+%Oa92XWzvDgo%u_W zfy|~8w!3zbb;9#hJk7!%@!YK0+TLf%$uEf8_dK< z6=aO3M_m}drIPQ&R3sd=@N!B%r9h(u}it5_5)M_EG_1{7~sSXe))j?++U z0DHkwEn1QW+fksi;GL8m!@f9@9(!0 z3)4G3oS|H!Op>447kt-W5>$$b($QtR8dPOH-f-fh{@k;JM3X*O57<-j`Xr!-#_XVlWXw8V_Jc;Ze-J^F6f>V_aBkjUL*2n8Q)=&U$()*@J}h zadj_C62Vud$v5o3)O;>-+Wk8Z`DticSoue-YoyK9-Hx z9tnzcdtv6GhqTwM^}4zsZ}hK^6eLa8DQc8;8S2(MvanFTBvFa&z7u1TU1k>1WsyJ| z*2bXcb8%|N9@*{h$zP6seve80xuOfw%FX^Lx1rz2ZoDn~w^!Je#4>@*~87av-=IIx-z2h(^2yKpxW^t=AxDlL03{p#2!IH5HmxWj68cjeqFv2UOH~$$OY6@!;_tlqEm-d!HtF`=zrOgn7c>R9 zPt@ul=miM}b&ijZ12Gv=ZBUE={AcGnDQTjs{@#z*XS%t)R+-KQQgBgJohzoeYO^30 z@)`R`|H7@KXkhc(g?XN6l})EkRXs)E^$yn&{{cToVokw41qVpo|I!8=fNqJ?yLa#0 z-OprDAfJBpw<{W;a%Hn#==Nl?ii%bnCf1u}(lwM?S;hghhVR}D{N&8#h>#25Iy4X1 z&<&xLv)jY^I&gdoRa8f~dWVlz68DJ2+5@h`aVOD|ljrKy-{6URqEUnqOnv>g5P_}v zY6Vw1fUC>PLB)E_TlNA5M?A9L;ZwJD=Eeu+!?lD_0vh$bR@!Cy*DVDzal#xNTpdI& zUFSaMHI|mL_a#ZDp`WWy;EVe-wOKKRdicq-D5~`nv<&1H7c(y&+<0^Gh=oPi;{>H7 zdXAD{gE>2N7Ahm23Ph0i%gV};=RfJOYOcZDBrUR}k7iUh+WKs#o~!CCHuvgbeHh3n z?9M$YV;v}QOya5W)k$x*UwgUxWN@B+!cE6Dl{ov9FjN2PA*m88*!Nk55@>+bFb$-3 zd$-J+12(LJf>ZhxB-4kTBDHeUNXLN8;^J_~JP&Es;P{B1oZCy=1JwxMZJ8lDt5@h+2$|46GD2_4DBA zzlZlS{D&^%Ip5qLoG{sXLJ}5Z>1(JUX}a&&h*9}(a(W~c^egUe`&yC%c=yrKvk=tg zCmQAfYZEK28n$KO0aB6o>t6bmmMU|stbf&RQu(-{fdpb&YZyo#M05aS?FA8=F0;P9 zxm(h3Hcwt$`B4$X++aMd8-H=|V|fU_+eRgH#~k(=Wfpeg;NaBO)>dkNORSr8cy16a zPhiOnQh&6v)=%xRQ9af66Pt&DC8)_df#62l!aFa_)8sA*3>K|cFrU$s$Rcj$6%=fM z#-!3hf7<2Q3JPV9j(QrMn5YFL9c^uMcH%<2RuB2Xl83zXkU0n*^ZRuaD zrK_LX=^xOvjZBVzey-dFVPGOsXk}rqL~4sGp|^r@phl*NgGt}RyKdHQ%}YOKOGRsw zUN;tjFAowp#%UghwBa+2ZZ)7DfWVOmnaREy&U5L&AyNJ&QEVk_F(u#YC)wMM6WNal zS3}G@Yed42(QHw52cjGjEm}HH;O&EG`T1QyBvWRZl#;R}FAuP*%Ja7MFb@RHwodk- z4eV=PtcQ(CvE3&+O+2@Ke*FZ?0~U=_xcNb+;5H^cqtx(`gL1wH;bk7@Sy7_ZEl-OseMG|xrL7g zSq7ZkrYFiGs_ks`|KdEyy-Lp7?)~jzxVZ%L8>xU$^v1jp409=8`GO^2t?l+R_Psm` zPExgc(#*v+y2pq`S!=PFUK)vs(-B!R@xsrR{Fgt`sKm*HT~RXVt{{4R?Li^}=URDz zoeGA|s?|Nb=)(vxHx1+6fW3x7AKeN)t9WKIeZ#FNeq8y{hSWgKI<_A6bnp zw1x0DAr>^CuD06PIp=kGp2ZtgkWXSYPEJOayqjwO@dwpYN6jQ*mpxjT2=*}kCC%Kj zKa7%O{*#khS|a)*qy@aF2?LMMaAl-2v_t4HXtn$YJll^XN16mmgF->PW1O6ErcxVX9T(tU1lRdZPnWS4N98c4_YK0Vss%+Af7y2m)W zR*m#EyN0cx;}}Ux-*31Eca&Hk%W3?yhwIIMCZQPZLs7$gR8~z=UrOWDeuk z*Ter);OR)3AWEl|&#TVsT*x7;OLzLtB}8zPp?wMiw`(cu^nuuGY^UWI(X;tG%0Y`) z%d2@hF*QZ{z1(}p~;2W^j^m7Hmo z*S&idU$*8!m*W*Ar z+U&-#L}!Fe5#S#_sGjul&dJwt;{~=LA3`696dQ(u{CsM;{-1u>_iqe{wVKL1K9}%9>D*e*+oK5Q88R6F8FZ#3;&BFacT32-vdL1OImY5^S12^t(x_EM1o4YnBLA# z;D>a6SnKS3Ze?Zl#(3njmI_y~i?5X#K(bx z00QS1eV9yegQJsuKDs*jC+70T^71GYnF_W4 zJRv=?bI9PVq{=&0lUm74=%T*IF&7}sh7 z65AaGwUUXFW{6}-o-sq?fqR66fg6;Ngzd@+P6_8eT{?^KOmU!FNiXSs#hiC0W{Qkd zttn_5ryuo#jM7g54rX=pBr|M|oDc?$yFp7aECJI8A%{^n&-(H`pto1`Bjg9*^odClmqWW^l~@CE71Dl z;;8xP=xEsM&aX01;%D)qX)nvW?_Pq?~N@9fXjEwqZymmC$dX0#7OTnhb7yvljdzF}y za*Ra)q@>5hGdm2@7NJNaAh80P*$-~}%Q#W-^F!Z>j~pJ3 z0WCj#u12;`7IJwqx^@gO08T~T?EH1`dI>YhL7jV93=q~-0*Q%<`iZF!093x~iu!I$ zC>s3jwTcQsB?E}Cl#Tnq$vU>ldfIURba%0%`D{}1$6Lakx2LlcdU}Az9aChA zahxXT6z+dLz2Nj{dm}Ct@Fk$oUO5*Y8VX;AdBH$!xfsi}vD2d~xiPRD`_Mx`e5 z$49J%ngx&3)*yn8aPoS)mGFPNa5wv7lP*glDd9AFBS44V?g-GrLMWk_mYLF7Hu7R3b z?7!#GEdg}jBb|S*t^%^*xB7UUL&3jf0_?UDiEuVX#(8dZswWF9otNjTXs*Y5OIk1b zIy-Nij6f9FYMazcr0%`7u|Xgw@qTPpuFWmwpE5Jyl&I&~zZ5~I*3+baM*|tpw2s1@ zMG3bZCX+J8Wp*h#eK6QLcX6*2wkC{6;gkVQlJR}dl@qdC1~NbIy^KVE$O7Hyj8CBA z=D-&^Nr}-5_Vw+bgCZ@UrZmr1dT3Ysdh*04RoIQk1BV*ntDlcxnSI-8yBp56`*2}W z_ss0g8!8lnrKh}_dpE3Eg{st_c3@|xRMT=HX+Dxhnok#DL|@!&OqmW_jP#pAbQhs3 zhyQa=AB-!_xo~i-8yN1h=_d0A&@Uj6O8pC*G&^_FCyhV<`@ZAasC5MT&Ec0slw}Rg z&3YSLk6BqqFNy`Mr7*7D4M5XsFbrB+K$_?Ce$T_)`b?aDZpv3mB7r^d6Z$%Qf{Sf+ zN$v3>n^e6LYW*bL-WbpJ4x)b!>PoErF@fWn0eR`^zl5>^P_R!JpuAo68%4BU~ z;{dZb^SVjd`FB_R`3_32-*H&80W?ixR*O6d@?=jCoN`J^M|XzEis5F$Bq9Sm9B41H ze?V@Im(jeBZ@M_yxWR*u8m(o=sOee zDQyNNRi)Ai@Z3)_GIM47ITcUp#L!WY4#UL6tZX*I#l^)>M{Bk7PPHKnH@&rghUN-E zTX!teLK0}fUmFo1NJm?Z$$;f}+Etk5b>Z>{BPlWQ3pbYRHJa7y4!&%E5i@ghY)wwE zGmT%q=mB1tB(1{Cfca76h37m$0XkIh%x-p8m01!58vWOtKO^VS`U%c)FX6T%q9vf39 z8%flrzui2;u=)0y6$Y#A8i4*Qi5yowbIh3ZKq9G&%tjz(|DV_|dNGi!o2|8zW`00U zP6zWcg+Ly-Ry{GfOg{h$9c=g8uBV9{VyE#!9w(}#dkF{}F>A>D03k5rSK1y#P5m-g zBx%5F?VmpSW&3#h=g*(%`X=yHXY7HCf^$IY5ilr{2|CK3DmQoltIzjdy5!zyaE8~} zt~8R>?7we?i>@}`B6W7DCw^tvmyzSvKnQwpMpY8?O}Nll#ION<1`qV9FblaRWJ7!U z0Hg8`n?KR3WCYHezL>Z;;28^geyO;DN(0+1M7sR|QR;rYcXHxtLnpfH{}l0DNG^_< z-{ZtyC~E~SKVEKb8woIxpAgJMJ!G@Yd?F$j6UJ$cu&v2%!0&0J0K$`E05z|O2nyju zz|SmR*+b?@UA1BphXubQc?Hy!YZ6>R`i2pL02#QS@RK)6W2F3urM(d(?mSxfBM0SLuSDV zxb}~jMBV@GK%fMAQ~Fh4GC`A#*;hOT7*pOaSRex_@fXJ4x(*D@|BD~0Su(hNUn6!i z$2M5l*wXBmKsO#VFe{AvhpevNB%K2O-t%s2x)!wP2rBo$#8u5L|NZNGz)bA|n@=gW z=jzGVum`|EntOJcaDiB8Ac~pj%B;pMeUIgQ@}v*YA^&#f-=|3mNm%T_G;s03(96%f zXQ5-tfGu8L+ulll8Z-#%zjy_f3W4~%f~Eyf2>fOeXbQmg3Yum?r^nWzaFEL{bKj6# zAl=Qp)Bq!_-&Je3QdxetC`}OVlbv}`AY_e~6LJL9ho7IHIB7qKvc-D==6S+`rz}TP zk2wwHMfi>L;NU>zyaQhB-K$j3t1dQT#AtQ-`LaxZcUI)^t zzklB}zn&k$YZG7>?(glL1)V{-E#7=jPU6L(wH{Lg(G=d`F`ywXMpB`Zj7^OKeYEr8 z_ab5E9kDa%59)&UtDnGzlJ9y6+5Wx306PX@bT8cxE`?WTqk1=mImF^)VggZl*|VTQ z2;5{yXqZH*ZhR%uhP$2Hm$O%&TkkP82Sp4=2gao+Ss=S^lQg+ z#>2-p?lH~PJ1Mylc@d2bRaA3coeIJI&54Td;Yir7A%4^$Ap^`jo}1$wWgj%DQ&aX~ z)esf82DEmrG6avs(uCdAnQPzAKmC&leUGXLH}syE;-H}YQtMA3ZhpU5hEr3d!3Ud; zT@35`G{{O@_DiN7vJIZ3ezYweZ9&dGKsHn1+r6=@sont>={JZ=A*gPlO^R-|w<3xR zmkc_>uZ!e2G`tqdYHs#AL9nO~o8^2=Ro(DCWE(@TywM?EkRsq9%Y1Z477Q^_2L@B= zb-b9W*1~f>Xmn*0;0K~$flEH8w_#ifQL#I?xMm0Uz+3Yn?-Dm6^8*IDT>Z?80fDHG z==Lw>tGtp{`cj5Hg=`jDTbtf1%*st3Y27<$O~Q^Ko_+H-Vgl@jA0L&sH0Uv99-rGE2g4m9<$TTaJPAzV46gRR+-*_eL%!Fum?+~Ag#|% zj817)F;V&R_9cE%1^**4a~fwj%k=Lvy50pBVhX^VPbH)W;p?!*^W4<`lNwlLYM^`y zL^_%tJFp$D=S->hyw}h*ucQQkBNd;1Z)<3c?g23qU<%%TnC$_hwci}Jgpcf zC=*>^tdx0qW;jiaG6$z{R=BKSLAosQpPu>UQaaPsHh;5+5FXsw*=c<`AB3iL?SuinM{Vu<=tOKF1Oiwjg{*uD1=pWik|JXfX9i> zu!|cysed|o5r~Z_OOesI+He2==l}H#ym@ AH~;_u diff --git a/g3doc/images/mono_4_of_4.png b/g3doc/images/mono_4_of_4.png deleted file mode 100644 index 675b636ba7c5216e1ef21670428b5f7375b054ab..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 36209 zcmdqJg;SMZ)IWOY1|^jcq(oAr`$$MhNGjbO($c7ebV*AI5>nC}DFNv^bc6Ik8V+4| zpYQwrX6~K2bN_&gGdkn*JUiCj>$BEp1>x$d@`}XYpKjsKmOHV^Y-0K=d6cm>A6>qXlP2O)!X%*<*pW zWc;c3|^AofU9M%tgvrjW&eNu zVPH9V-@EQwgN@qYZ~RBTw*2Yi;xgf~-?Z2bxpg^YDyFE&$^S-UfX6}8)y_40ORd?I zXz7^iZEbDxv5&Bf+Q-Mq@x~As5)u*)p9Tg8+I~L z;E`obd<{rLEJ=c$UCTG&Iwz1`h))t1sy zLNsdBAMnbgS(DoBqc5`E+V363GtDkdTYEg;loh&{(J1f!pF-`)XUnk3DC1XzM+U*EYK8o0(JTeR*_(`|1 z*zD`??~fnCnBWHAa?kftdG+e2L^Kj@^i0Q(@+u-fvPzA)LhSkT6<65xrJya*-r9wL z^bY0iAD*?*9unM(y*&9?Gv9r%T@wLJggTjV90oQeqnm;Pi;HaB+vhz)^KLWw6p!$r z^S9Tu1GUhi;=OacQD>}&doX6sgy8O0+EG~xrmM5>-@mKN#Cm&s_xJbjIDJ4%rw#TK z#+x-*IT zp=yhntN&mJ^9d0C~0FT&zf&KoYv=nzDFt@YbVrCc`S&sh%6lWdu3 zIt82KD@sK}gBIqV+@5}y>VkXB3@?*%J9(BFRQe=@4pIo2&0JHu?i4z(Y^pNdNdv+T z?bljbT5>A3r^X&h`YCXyNy!U~Xu&7=Z)U&xGwNPdqlV7BT)9L|Lqp*6u1dn@szi&OyLX}4H-%>W2|Y$nh}#l1E$ypy zh|0U>kA9*nflkYJ(!VQ8KN7$VN)ots+d zq+C&6oG+jKsj7OucZ|37?K+YLFSUe<@rLUi#J7``k!}^-uC*KKhd{4>Qxn0jU4)78 z*bI7b2O}>mkg~$mRVqsx1{UO{#$z6G2`MjJ(5wgCi+KN=tpb@TQXyuD@B71+awuW;{*!2W%ise~ijo{IoCAM0)ZlUV$_gXR84eE4CrW2bY{c*V zss_9_*OI<*t8N#j4rg@`YnAmddzrFf%T^{grt)i&SeBz7zF^ z@7Z36Xv2cjCN}XHH#E$LyHlTycc4goCag6Rv+;NEW|%%5>(RFq3oM(Jy@kg4g<$5a zP-+ZfLU`_w5_^)XrPTH0YI=yD6W)mdH;$Cw`Lmxc?e0?HvhA6$rQcHj0U5e`6q=|O z@*zFFwUDQmSD2`9AnrQ>ESt1fT?O2&?8MyY3HuxC>xbFx3qB7vej4CMT_4H4=@a|O zm?nms;-yPl7t0LQZ2D(+Tfx~7ea(WniQDO?%eKEz$a*jwi!uldPqSjeNkrL}D8(2{z z68Grh@wn4{Y@0pTbF!|B!N@Acy$D7>j{7+P>lvak_=P?_zc2h+MhOd1TFO}$^r2+s z??0iQ>;N*!(n-i*c76x~fq({~_JE0q8wtxO<-T%UdIi(JjvucnC_75G9(-B@v%b{W6C6{gPh=#7V_;x>@G9u~`U&=qo^^%^wfLa- zXz3YHp*nZcFS3lwd&&fE&WvFh*=^3D_gv8VQ&MNxWAy8tLM~+6{1+P5V*MvhMgA1qK*-2?7`LX%Rm=byp99WE~K^dR~v^g-5ET(KXF4Fm*+^`4=AeS`g zb;0Or=p$eW$*6LAw<_K2v z64A;T&>UtwyP`zA{MN#+5{6*TwH7L7h<7!O5@CLZN-=RfW#)wY+AI zks$_qDDsig^}zQ&-qMTMUap~)`Z#={QTkg4MWJKm-`6eVKU_l{HW#T{$z)LH0u7=R zh9-XP9^?_^<0EtTkRY`IkXBe&n8$v--c|RhoO`IRzkikWDbv(8mH*}C<+L*wCnu)i z?=%jP&wS8hCcSfteFe>T9VPiH9|MXQw$f_W{``?G`i`&33_Xq%YS3Z4`A|}e%enUV z>>F@Za_DOsimNbEjy3|mL%I2ER^v{Y%amHS&)&uaDt*X))k9f6VD9eDKOXxsl|67Y z;kCacCOByUx}2w0%Ou zN1UrMKTKbWC&$M+%ZLEp&hO#(^H1Z=t5SyVzP@zjySg9a$IV#rG0+oez`YzMd^oxD zYkNvOGG;Ac|KH`6nPMAsFxYnSY$RYLY_BkGM97TKC(IiBTNw`RTKMrGFlfWcB5I9W z$SE1G{Psisuijq8+l#mcN^goVTxIX8T2kGn7t~VE++C>+1uF*3?p@;;(T6Swb#lYo`jcIfFMO4ipk}%D)FF_8 zHxn5~k=@_I2<+va5;4W?Dt<2txEv~2D-d2`Y~J09&a||&vM4(ZEEQ+FHws6=&wt+T z|K~p@6u=L0GqNH2@BROmzk+bdDJbY9{oa)6?9bM6JbCgYNlm-tdtcu{3~G)bHbIAJ zI2?Ykk>PxM5GLh!cXK9nwNpJaGgE=?cd?#A@<{yrpo2tSUVh1EyDTOq=H}|Gu>Hcz zOS8X!e1|hBDMW}2H7%(OV?0|@UwL^siR5_@^J<0JNhg(Y+%v~oy?V#F^tN_#+T+Oezdq~+9=d*=!~J8|(9kSQo1=G)hr|GQAwf`T zYHEe98}>FzO3KCet3)H%`QJ6{T#pokjEoAj*qbI$!`;IPmtAoeMXkL;R@MyZ)q1sH z*ytJl?t`ny31TFXP(n&=eSO&x2Q^~qR|#L8HN=kB((;^hYi4$KHlm;Zfgire-eRX1 z>YhPf&sJIX_w@AWJ~i?5tjo=PJTf&mwV9af>jTF^J!deptGzrPmbzn4^4kKzz( zHP52VJbvDw##^Hk8H0KU!m+8)yIs-xXn4Rq{J8f?HnaOb#7A^H(`Cg_*1(irI1}n6 zh8oAYI{$+<9OLglLqbBTt%q65v1;^&r}H{7=I^8?ER8c5T_uHF?>x}HkSJd^*qSgNA<8TI)`sw zKtO<{=*7{Ic=Zc6w991~HShnfTu_)Sqc=GDRwWr`nG(`EhaA0KO+pFY4`w!~?@gMH zhs2D(mt9Qx*`hBXq0PWqZ zmTEh<-gvG(KDk=D3s6-hzT4vB;(ERAY#O^NB!bM6@Fg8rQu?mOi(F%7N&1FjKaf%h zyIRqs;LBa-Krz_{t(Ub{x$50`{DOkX4Rx-K{`exHp>TiHi<7AX0|Px%uP&~ShlL!b z@BLE;OJbydLS8bF-atu1chU>@6ca_^-fz7X&qgMa0>B_mi9HVfzKD+{>!ULEi*IPe ztF9`VnR$Zd;}WmI29N!3+^Av6E6yX6+0jwb4Rz9R=JtOCxiN{QFA!DF?QFLcL$k6@ z^X*V?zPZt2*VZaV9szRCReo*ikpg{P;^Z^acxV{C(>)nG4xL&1V3I?09a zKN%AmYJ7A;-PoT#nW7%-p<0(C^7I!fivz!0R$pqwkagkE`^6yynZwi8thi-{hrT^P z5p0u_2)je_Rz*a5Mg|Khp6}OBI3z9L_!PxeA6#5qggbRj>SRmD%+-%ngSR0%#W*C7 z{v@hwZfr>2AjiNP!hGdG_!W;JsUo`z;+#Y&P^YvW-1J$Kio5=Cv~x7}eMM@4A}L_$oA!>UAyd3-G& zD5zd^3(EKW$?mtUzjSHm*fTIdUTsQ%Ulc}?ZGpJ(R$`U$LaCiXU`6ZgB{d%0l=?;9 z2=g~b4^gmf-!ry-tts#r5?2TAem&*r@n&}5qaRon&u4f74kIxIoQ<>9Lie^_#DYN- zAarsIg+*wK$=p^1PBI^5i^|K5K#*j>ZZR(}FP2`4n}b7H_E?EI>w|RwZ;fi0Dh}kQ z(t`D)Gd6aP9Ve_siT#So(}#scmJ#2`+zxaie0JvYI|hSS9QVfNrslJwmaDx+LqkJpy505=f)7;OiM_Ws zOLsTCs{oe`rDeS&_BIl!sR>_%#Xd18-WMBHf zV*5(o42)d&q*_;coqzFG%QDE3Fj1TJ-3aLO;}JoWR(KE8Pe;<3NQTK$!=>id|h@E8>6K+uK8lXc8XfxVaU> zVE+d+u7Khg22H;2PQ|F!Lvy2Ac%GPmfx$m>Cs^_86V0&T`JU&JzM}YU<0hlqn^ufP zJmJ4G5u=x(s;xMI?QEaCpqY0Mj+^;UeR_M5*gM{HXMP*0(Pd3sq^x8W?|aKl3SD9^ zaPON2CHu(V7`DhK`^JXe#Xr^qsv7kqD}B&vPaSnzuwD&21hML`h;NAPH&wT{zK@l= zfetAxhp0TCk^QWXH#3JLpVmP6Ag4Q(i{KUI7rxc19q7q(GL0K_ z{u$wpsI25?E^b{JW*2+BG@GdlYp=9>9i7Lm!qO}U;sq8HEBOLbe6!1~jMjM*UHVgZ z%LcY8AI|bXs|&}wn<5e(mRJe>>%d05a4v8*pUn^U>SbvutQ^2Znr-m!ERG>!g`sZ7 zF&BdI88gBmEd;#bg%No$Selrss_M|rj>ACM<(66?`+Faw$^e-Gh)T_WaGEDb0N@M@ zF=RM0o?|p7!o_UJ_ei4gFBEkV0lyyjl&3*bPt|aiCx4hd;K>)RBom#N4Tgy|c zyd@Q}&c$2<8EDWc&3rT2fqN^&a7U#p;AF)i^wD8}2z4oQ!Nip*(-EZXr0$*0Z|f*k zq~T{pYisL)Lu4dR3jZ8b|At6Ve{&U`qIomXf+qUTvRr-zZO7L;Z3#s8b@>C;AGfgcm zay5lr5KP5cij}-%8$!OiUN^q|&QMPhaMrkFcwsE=g5j=1DIjsf4CUc^9safx*o}ny zIWvKyR~YDrbc-g|CP79Abd&4HG%Q3EIh%eM$b|hAzeF^Qt`BIlbPhS`-;e2TCf*as zeZ)pww$NkmHMqq((_tA`?)P@n=IrBqkqwjf&89j3%nnVt&~b@+`STwU z^u3CT%Hi60_ARpTI?sRKcS$(}c@4~>9Fk{omHAbtE$ZS#MTg9ZFfo~5o$i9rXmS59B4QOKViKzR=HwVQ8y%j!f#M6v^KFCx^u z_nXhIN8OyAE7Z3xfPlpP&foQ6+nCw(JdyFj(qH(pwh+J|CQd#zfFm{_pXDs>I60=HtfDg5;(s++rXMy$2#LFfIGznfwJk^%;nC&H*z$km zzW-atpdk+r{-vH-k7g&RdQn8E9TO1)S7xX7BXb)-OQ(bE85V07evBsTHz$G{F0vCh zdvS{g2~$u|0J8sw_6@!G``>CEdS!yww^ElQvCJ*XdIZ9!ubM74GU9I|Q}Q7yQS5q_BV2hHh*~7jDQ=t)o)rss z{kPolEv!AK}+gVg@VVE)G$^E`)ZmG9#RvoMKu}@f$OKZlyg- zd-eJ{GNlQkf^e4%p_RIt(ZtqV#nW|LR&lEtNxO?{9ao=I_qs&d63PL|aY6gN-*O2# z1Z|88>hF}8W6O6pdod=<(&xZ1VLgN&stJPB-amoIT}gU%=?}yEBCGND34K-vdJ(Cs zoIWWWg)7AHFJAZ3n=K?XwOyxtL+2sht!H^yjpW|hUIy9zYjX5CbMKYe(Q)OzYjlbr z8y)8a1B5r!6wb!7Z)=K*dUzxVo-gq&ArLn1cPlIcP~Y45t;1gU-EQDc_!Cj7r~eTA zkIIYf9318wT$n@!Ay86u{&kh$0A6kzAXu8KWIyY)RJ=FV%tsI|)6vbUeY7VSlXkgH zWWOYc1syxT?2o0SrX=H%=gbf`lb;f@EPlJcSNVI~l*d>1d$=If+S_;p9Lq`0?(Jj+6y+-q;9@s{~cq5CQknH8h#>^?JLis5Xu_6 z>HdTe`_s~3I^bpEM8^ji$y!4W$P(tElRa9t*oU8YO(Q5j?x}Ay{yj~+v;WjA+SybS zDUy>>Ppu5^@Z{zBLFj3iIdI>gZKKqrLXP5RX7fQ+5Z@OpG4$PL3lGGY8{ZZDm5YcM zzsUac8>8~n8Papx7kytsIDKL<7;*r4ey_39xZl#aq71k+Ps_RVH1hgy+b5ebH+D>$ zl^JWkz>EGu8002nNtt~~oJlKGQr%a&V(QBO2jBv4A_;S?5g4DBXq=nwt0muz#0HSt zje`_Rb$$IAKoW96iA3Qc<6)L)boZApUpi*)%?JK4x;4j6$(6eEj#=4Ok+wwi=?#wL z$&ZwvK}=KS2!R8MK~Orz0(GAL+2&C1>gIHf45U2bLuTI?TY?pm>X+$6 zs?7uUot4OU|HW5)oiHMX_o264LM@Nq!$N{2FV+$PU+Hp-?j?dJX+P2888H~p_G`Aa zGF?ra(7sK5eI^n8EYWqI|KUKvV1`hhE(YXxy5_*ZcdKM?=?s+IHp$=~_DrfaFg|Jd&u$_@m~uhD#_JZ5bKbNNtY?2sun;Q=oBUJ`rtN#ViNX zQ=l8wba!~8fd)PgNGh+V9E%_aXNnI~r?YG_m5(3UY!Qtj_8wP0=Dz&?E1mZfkfLg}E zn*#4{^S|1GSr(SxpIXTl3%&D5>Z(`me3WD8iBv;nIG7V(Yl+Dv{k_3fQ0v8quZ_vA zwbdUD@?B482OQdO0U19)uW1@{uzLYQ@2IqhLI-5`o#?Ui%Xt6c=DZauS3huqyrtz; z+oFy(l`ASZ5CsbSh<}#<9)_~@4RP|*u|pFjd{ZTydClu#Z85B-#Lp4F&=#GIc)8cq zR$-<~s|rtlM2kqyEXu#Od_444<@L>g`Z^n#tltv|HL0J!D%0R zr@F_&xYb>Z?kA1N`^tR^39A*JFnUO*)TyW8UIB{QzXN+2zY0(`Lw5I9JcgQAZn|6Q3)5voT2f^*E>S%sc zp5Tq2k?ZC?;+Z@n+(xT1bS<(q2q#^F3_9&tYH<2u!h{lt`K-U0bOlkRab5pvZCu>^ zmsLT&>t>LcR74L40yZZ|7+mv z)%3ya5JAI^)AeYi_`Zj&E(3ZF1Qsm}Kb%#3vFG=e5CC6yn=%~DAl zY&`>_pvHv*j}|+Ht8NA{FD25>vv%IdQwPX-YT%gWo?p|#58LX=?0Oobcd1!t;se%n z6J4Q4n%&0)1f-SRq7aCz>|lQLcaxow${9$r$ZUg zStUKA+4tUtUY`O0fd<747I0;3QC?LGnH!{BsXO2B1aPgRBS#bR52N`;9x>k)gkw6n zQuqDni;5ydf=%(VHM7trB%vee_Sc-2EA!epxOb@dp)^fyKEd75>B^s2rn2B_U!Dtz zNuHKn#Bc%k+&CHJiAyISDmMS@V~wi1D06x{4n8FYo};`8>(&7szvic*tKiUYSZW=rH*g1WCx32ZtSyG5R z;GCo?b@jY5Qm1<17q1XbGdhG}Ikb8G-u2cr|6=8uj?qVhqs+<}?cdDiPSE-Zu?Ws% zK~#Sg=r)0@k?pUD*Ca+RLzXX=5C14;JymwAs;gE;$7gt!(Tg9ofh?0*4BCE5D0Vf ze4}SCKVQ~tAE$RSt;6Fa&&BIO{_4+)LVUK^Qrd3zLDg=?HmOoG3O*@!fkvMpTxY5z zLaY8h8D3hxefrgpS2N#HRgd|1ILd?h&|3q_j>QPv*6&So=#&)!fBtB;aPGU&Uv12m;-d3+zB(1Ze}EKg!D#*$ zk9Y@+@{9zL{s?>P_Mnm!m0cjrH~CfbSbAmPJ07um{JJ=PkkP z(=f$moU=w5`S$Ve?tJD4c2@m8N@vf}f3p+7Wpn+)dTr;fet37f>+ci{TGM?XQy6pB z^4Y5(>yK31KuQ&F_HFAy8%gzBj#TSwPrk~fG%FSinbRw8v z0Qc2lsinE5M$4nAsz7zhhW`Pl6^7#|O}eTbe;6^H&qkW{!662O;dNEh8C~B9CB&$# ztSm7xkzb|SxX+>r6xYQ6Y;0(Fzdc!2T3QMOH{(~Qp9`Pd<|NiXA>daWzSOyDBdX^9BDu)F!-{LdU0zP3K}0EvhWP;ip;G?1I)Jn>vuc5K^HGqjQsX6Lg%_(yXv|)r&Ez=@A=@FO3h#Uzm5u zvv;=^bY6_YMpYaHgNi~>5Xn4sdUlrBn#jeklE`CiWu+AbgNYomT=auvL20I#m(E;9 zUPP9p?#oie!f|{VXurMqb z)I9b(dLt1dprp!6O2@^8cgFPno759gz`YW&pYDDBg}n-_$K7o`igK1L1(bbLqjG#$ zQ{bdhp@c}&hg*rb_L91}pI$(ZA|@QF0jIw&1$;pW!@Ho3u8brlmWv})@f7h+a&B&} z07VW>MmoAKebgEtU_^l-?ZbgtOo0*+@eaV&=FK!%`K|&Z%lLMI^@se7W#otwh@-BC z(Sh-TP3z|_SZHr3$1TnVem*wwv7(pfew%K?v^NCK4`OOzX({1@{Jb_>Sy>5+8P?a=zkg>rb!wlV zp9k4(%TRT%y6?(Co}!tD%yq2+>c5BKR4-%X&Skv|XKcPtnMe)v^az{7ON=VFYH~y^J-Pw8^DO56(Au1su0n{pjyfbM5ESecQJUm=q zUr%r@y+xXnk&)qLB-EWaZFl=Ez4ce$rwf(%0m9}xcEgeieftGx8BfXuvsmk)XoW`r zlWIEZoEAj?TOd?aG4HMLY0LNnYqMjH&=?CV#ArVHkP)L}<9e=>s&w{3bSUud`za%BO=o@fP9UL6|{hOm#X7mY7FIQblU~5Iq;mU{lf=>pe zLZ+uvqJ*w6Kb4c_h*`~UxrYI_ngsPTr;pg%V#UE|f8>g_B>m3gWalO)bv%`gjhDUUbuYgS!e%V_wi<6DCjB^Q*w$!sJo z<4_YoI;0YSWOa2l^VHDbVCAPlQ1iGm?jb{X`^VG%36u9=@CmM~81R>ZT=^h`0>i}y3rq>1SQm8crGjn1uIPdWH2MqfkS9HQIT5fHuRza+> zv&VJPo!r+usF9Icb#CRT5 zAt_x%WMpqLP=s1aP)c?0(%r3D9kzQ?DRV6y<9w@vch{IC=62A!uHiK`DX~ zX0agu-~Wa)Tk~vGEZRjNOj;f6?U{@LD!Pd^7;smGtnCOq$l-hboH~PgHTzz=uirVz z^V?ESfxy={h0B}vFUcH@>^p{oM{FbA+S1=lhBsL`)s3Cc0CzO#!BcX4QadV2N)U^C znU|K4!DReHi~Y3md0JF+XB^EpBTck)Ha0e!=hO z|1eKcW8pH?w=|RjZKN1 zf0f6sSwu#n1XW;HVMJz5N*V~ly2c_=Q z4uvJ#cHp{e=9&n{1tqv{xV*R!@#>hagUl!+P`0CxOCyYx_@s5n+o zds_VvSe5Ol=#S$vz@a)M5c!tL{0y`SEKR?$9jEZ@GQydEH)LdPJ2aq|{YR&)J}ttS z!k01z;81TD-?=Hu!oCOC0W&CHTwDaVvaU{$TOc_p=`qv|6#GZjcsuJGaFA~_W^hpd z*q@I-=5t^3vQ6;nGvQCpbo?BsBI(gs2J1}LXH(Yk%@P!PMDG6xk(W|RA@GDrsy8Ys z%KD5W2BBI^41^T8HXRp&6Te-$FEsk}vW_hRWb0%;+w4D{Uk`6;ggg8`4IoFHOXfFN+iLF$f^6IVd)kR2E9P%~TaoZagnX2Hne>`4| zi5auH`*m^A`Z~ybbavYHG@3k1J2ifeZ!r3E_=sLIhhxK1}ocYnMWP&^JEy`I{yyh4sMEfjZAAg=g1(qg^4?U4kYcWpuf zDXAU3q+id_4QERK%_QB32~_Ac^rY6o9{eyTtAve&#>}W<1sBSWP)}u#PFm_rMR3hJxgt30x>ifa`RckbH>bdw#rTL zTNp4?r9U8plhEOzp{jnOOVDZI0+Lr!veA%CTv9v%ffHt^l%K--y2OQ(r>CH!+Rg#r zSy6*caUFlS--Sqqs|wetEQ$0!f`5c~ZcI(8+5hTvbW~#^d{yA&*UP*%6W+w#%;7YI#9Er=DQG8D>LUcj204NE4f(9&f$0cZ$H#Ky)F zoao`gbH5~AfOKw* zIhA~gjzPsuR~_S@6pgmIq0Is+9tad%mas#uEiFOc)4mHXSisI-esuS59$p|>JiQw@ zyZ9=DeCm(>{;!p9>$2u$JyYEA3e;Szq;_O3a=;Jk+N3L}KFy=zI#P8Db{l<|U7z@@ z`ApmGdZu~HyK&`j0P-Es>j|=)w}4BnuC87q#HFN`#>NsUdXFA~=7nGdJF^wVpaA4& zcD93Y=hUy?xCK^VV?(xq|B4G`Tdt#Ngho|q@EXyL2*TUb39^o&m{PuFy;)Ur8_o34 zX?EyuedzqLkSW!V8ti106I!ickj#fk>+v!u#=wQQwg$;8vjmf&pntDAW<}v$))MM= zYVFBl>ew!QL40KU`1XbIX(fn0zi)Fn`rgpjJtMM3txp%vSxCC^!u5KYo#MZ1#!d(i z4}~;;8&U;jQu~B^Jk4hJMWDKM`u!ynim>bdrO%}M!tRG9H5a9;JuHzXsP%R6bu6Ix zt1XED9k{NLeET47HnzAh2Cc_9=@&oUBYn2+95s*KmooF+elo&iqyh6qiMHF2TE-`BgnlOTV6L~8*M7#IPC7h-F;iq&O;Kgme6_dF%R)F zY88%C@y30F)!i8!R}HGJ-`&b;<51~dj@;zrhGx&uc7; z<$<^9vcn2i;oJ?T?At$64Lv6~CjW4CYFvHe+Sn52R2AC@21`}Ag6z7&6p9QD3~J_f zVK7*Di!dglX8{JFhUbnqZ-eKD9045o3dNnt_|b9k@s)sT|KqErY&@yy=i}rZU!ZU525P`06yCYYwmlv;wk@qy_8M&jCD1 z-@Dx<4hZ$UJ!p7P{m!63ZR8SZR|M*QW^L7w2Yk9rPA($&PJi6J3Oa+0K)qYsBAOm! z{3}odF*u0VxWa&5qfSlxEBh{+$ROypv*J5WOR<_eRNSZ6r?ir_nff21n5=uQQ{Uy~ zoK>W0g6;4^nhtd{P!^PooSe!RoQw?&n0bGJ`y+rEBK-l9Ne)c8?rpW_MeIX^(Axug zi*VI2R7^cfIKhZ$!a)FBDR5fgwuo;FybSbWMKuLMXK`$zUmgpir*?6_bv+iO8J@+t z^`r-mB=ESyBSV2V`c|KIbce`xTnUi)0?NizzMFNvGVx@m=(M)~7wG zI5#OMkfZ!R!litO?oJpZG_GY(qlMfGIGL+Bw64_zk=y$)5HXzWsMQ8T6jFPDU<)G- z!knt?pQnC}mvt804*ng0@4XygMC_GsJugdFXEF*-?*hL3Pv-Y;?mPLy!sn(p8()!j zjF~Xlv`Qpx4vA6acviifuh&L=L>^)k>;o5$3&-=CorYFXDVp5fXo^V5GzPlL9(@0y zts-DMCHb<^a?_)_2Tjjq(BXaC{Rz#Xn$*aM2zWM_e3S=zc5*T}G-Ow+cXYP0va*a@ zG+_sFZL>8Vku#Qorv-|KCk1IC(crWP_gM<>N_2iNThFe)Y2_8$o$cOp{~75UdBG6B zfb=5=iR#eaji1WQq^KNj5Rw4+aBj>!r^xp)pWh0PcDnd;`DW894loN57!hU$j^&Fv zOO=Wx#YC^Vi^{1E?|gONsE#R8lKkOOi|7UWvVE9i$URl2Uq4-(x$f$%hS}HGcP}SV zxoXc|MYVsKc(}}UFPnCm%Jur>)5r5w6X5nmb)+7S2P$-Txd>-oe2ayG2mwuYtl6IU z;M+04AmNE%%ZyCsE6%~OSC7$=H#WqKDkI4t)-Uh0Xr&zKKP)i3;0mJ8c*1dOkua;fHQM1;e6s9G>~k)#GUyk^k$f*VJ*1z7@8keMu+(J+_U{$!(=wtj z@^VL1OpIQ_hy49M!_4%=1k2z3%=#1jTi@Bc0Up?svZGT! zm!3H}zpFpi{DSxqSM0NXd>H9+WL;H%vcjGK|19UQ_%Y!n7tlVY?MnW$WzWs1H>B~lpax2_YDrN2ZBCp zU~ss3coO%`9wElH8oW}9L$a=~tj`}_O8LKIc$LL80?VCPG!g3eq$n>acs9ccV8Cqg z#q_(O7PFp7NoO6p3R!tjsP|d>#md;R+4!+DXwi&U_uU-IR2M^n7$sd#HZHvQDHD^7 z_di(R#S~{iXuw5x(oQ=gGjHM>#nG&zZ`KR8%8{!zZ(FO~@BZ0`_5DlyKaQ*RNrb@l z5B?J_NdH z0ZqLM<&MtIB-hDC6~H2p|A0`V>0z!luUI1vU6c^8Ce8F8|NazHk-fqDAEeN2He=!S z-}G>+%;0X`=^ad_lusNq&~vdsr3!)pT1fLTb3(iMUzTbVgVGmzdBwaD252E`crZ1p z6~WwmeD*CASTcmlV3-(bQZC0_*+w-nFuz`+AOj}axK&e}dydFG*`CVV2j|1Q7x+HH zCp8EuPO-Wb$96631LT*L+uiN|99gDsk2n%?npc=B^DW1d`n~{=qSp!pixB2u9p7k_ zph0^PhXxANaq>JHgdZSWF{PY#KC@3`NC-=F`+3+n`De;}jRgoYvTVb+oac0qprk*@ zLLHR{&hP#Xn(tq-=51_Dz6<4Mdp1|fq8t1$jC=nlwPSLbemsieB-rS{k$!4Fj}sJ_ zU1E5dpoK9jX2TYq_JT5->+26~k4I7lE`(O~s`PIXzK{G@BIZL5R8!*wmpVs(RU#9_ zXhB$DYs)qu_ZU>_85r($A_zh2Hc6p|Q3TeAI6?kx-cRd#_741lkW zo065n#@^!H2kNui+og-HumCA7(kZ#06Q4V#8xRnXmp`7rIgzOlgwE%XRA zGC=mi>zyldVY{C}$N>-r-67ML#1OcWCze%Ubet+qBPST7-0(T4ciNk?ZA{33^$+`K#tG|{r+iQ83pUYQn!^bdV99#q&Et(AGn--IstG zk}*+Z+>Cb1TPH{}PJtP8$^kq3oU{~OO`SdS>sRI_3Oi~ZfV6Qb@(Zo|iMc5$#dU)G zs1}d>5PcU+gcF)k_Sy!(=LCC2rANfS7hecVO77J+3r{Xxg3E*pROZoXFMRDqorPtt zBlwJ=({=?H?)uosIM16_LZ!6zNIppQsiEQRLm#_7%_sk@eRjiKX?V50={=wa?~u>X zo9!MD-fGGw^6Yy4&(u{dsp#&$dPW@R?)dCaa|;STeQ`4h6$8U7%v_U>9W8i+eudQ1 zQU;ch<70A_y#FQ^P?39>)BPt(=Pi5EkNkX0G@l83=fdRVYjY%`$a<0(d>ur-8uJ`V zAj-`auQ2a)2JUleBQ{{?_j9H`Ry`l~4-R;U>d>m|x|{@W$0^}|dpmh`pYKD>yU@^3 zov)r9+;B%r0PV4u>sg@13?9Jiavi!OpSR?O=s5@diO`MA1&(Db<0`hP7rk=ix>`ZdinW19=J z&6@GRPf1SxSdRMSmm+gk__rHY*s2--%RHdh3^P@!Fae!c!^h~Y4uDTc@m^us^tqY8 zZT!E=`|E%xzb||g1tcs=5TucmuAwEQLqNJDq(MTuOAwHh?(Xg`mG1769)^aY>uf&X zbMNoo|IYu1zZ?hNd1t?SuXxt8p5@~QLOk<84~OR|8GhQ1xwWxTGGkJYH^@kdjSOA( zM>b29@4*}et)dGbil?HOI;ce73YUM8p>}-mI(pMu@vkru52_A~0BFe@6UWN%!y_Zr1W8N|q@XGPD;Mtb9|N0OAgtFTlyVU9K@E@*Uuq*UO2V)n=8wv8#90R;;Eb0e&GZ4bAM%en4PgwZE(|xWLODLTheL zej;{>>M0WE1D7}hY%|_Rxda3u4iM}uO?|TkzZE4WvU8T!$44soGCrqoeT-5e;Q*Ee zA^fjH(C1T=D6$zKA?19CnzktX^TEQ)>j#w_DQME%pyFf|9)r*vYt*u{v)9?I=xS-X z?ai{4sukNTGI~pY7uhEd; z^OQIo=$McOPS4K+x=bY<^ZBGDVBj~e zv#PZAbaqBdm44DUiGWv_PtaI})kSi})k8pmcH}mZ?lv15aCOwUWfIbuS7!o1bu(cQ z3`uJX`^e8$_5Gjh{6hij()3p1wzUaO6YG#({f`{jLcM^!kAs7YMfpH~!Qy9#l?=#o z;h}um=C6VPtwb$!_!e#M;ahW`WpxZ0-AHe*upttHaEzU2{8^f5UXc9D=B44`y0PhL z8ccL*Vsa1ek+YJ)@tN(yMUBs)Yzn7=#iphL1JqHL0+0<&;z%k-JCCq{r~%uKtT&*T ze?o>r(04Try~XUu-$f zRj6VCNj}$Ody$Yf*ApolM^nuPR0@RBz|B;?r4e?f<|&Y+nwXdXD4k|jC*Q%k$XKmh{aw2NQnykoZ%xYgXcMCxYJs~1u*96g7)=FV|juo4HW$KNKGE?JMf zbfapI!sTaMeO5a#Ko?nYwVPu9dV$Kw*-_D{IX@sLgZ%DV0Whg&QC257ZVCXJAfxV= zt_-$dLejHQk%vHFUP_Lqrl;!~8hChl3uksY-}WLGy%Yz2h(GxC5XOuDA?c~uOaF3P z%-9+%O+&-9>$Y-dx}p?<>>6ovs@^u&PH}JzZdpbdNXMn7z{U1o<12rJ`x)IO5{#61 z-`%C1jr2XsKOv?J?8lsGcomyASIA2^GVip%;Fqoj?qv4YQ6MoXsTZ-pN5lI5jlQ}a z#(+HhJ%FdO84&CNImO3 z5S#h+OPzq^EXFT7S~_)(4z!Nz*J-aO+EYgLqXkjlX?5;4PpnMLmyLWc8If3Q?6fTN~`sbOR1EviB=UfrACqVzA`0owND-ghE6HJ3!&0a`JC8Q-B;bs*~8R&@s zRN8eHN8ol)&`FnjLcZB3cF7ZDAwCGHcso}{I$;MbId5(N~9>1^zl;YS$u9;O3uT0 z3Ud+(JKah9kUSz;til*2dI9@YbWBMV#HyCP#J2f;`MoUGA-w3dB+VKG&!gbnVGA*U zM}Q(c)=1h~>kzZ{pD?k_WI*Ulk|3GaM7C?pKl1b6Nlc85hPS$><>H}`feO?x7q3-T zuIc%-s%!}5sd>pEh;l%H5)7VA(jKUf?D%gb=WSaBM;X-AB^a?Q40ZPgnWD!Z<&I}( z$N21q;l;ntH?IyR$mq*HZ?Tf3;{#N0a$&!Buw*#Efqo8ir|op7Bv>=@QoRB(3UCLL zA4djV4m1Zt+@BvRpVd;p3x4yfF~EwCbwMzT1fg04J+iwg6jMb8O_eI2KK+yKQ&wI@ z%?S*6_k-d*el#%|J1Iba#6nQh^*c=6xyJ{ctM%r?`A7>9f;`nbTYimtRq%)6AHkf@wJ`O6!>Q z+r!%b0Hnpenl&pFKoQ>E*?F7kUQ`)79MbGto^9#9y2l*XEq9_?uc+oo@3 zYD!#Wzqj`(61>QvsplzhO<9nF8CO>t&Wm|9U z%MxupTV%=vh$vvA;^p5#cj(Q3e-HdWwc$dPMT z3SP#Q*%KTe-U4183)3pu^s2*Fh#*qyBfb2?Gf4|iE=rQU(Z~}SIaMn|&*(X`nsL;S z#xWxvRc+6L+anMr_qkJjdN0QvETjZ5N40k= zzUK-}cW7xGbD)5OTJ9rs5f1xvoA>+89gALRJFDFwOXIYV&D3=IYCzWi>d89Wkaa7$ zNKBI=Ws7-ht8bd&_1c)u8okZcH%W=`=O>>&bHwXbswWDAU>6f3RhLbkEqmbWCJqCZFx+6@y6UGAw)P(6pP)0B{?C1g3Vi zMR3o8Cq-((PQYyqFPYPoI~=7llY>DTV~uKz20 zzQm9%ZL|Jg&>`$2;Un*;RqAAutb6--d^aI#)E|r1VL0>KJ5TY`7(Ird_kEF|n}Sed z>^S}5aQpH#$!0vG#QaD0n!Lh;4CtwFbZ4GL(VygGMxf|KFZX)C2Jka~*7*eP=`DZB zC8?tRs_>RWTOIPD0gYQN zwacS#T(m(=AK(zri>C-z-{kz82qP$k%nQ+h^7oL_Zuszy5GU!>GvZd z*5ToX+hMg$9H{X+gYx2nm>Sx|iJH5|cp)~s91C)Q;3kWgDbpf_Z~WC4z_+?;YH@$` zp+H;(oJa33+%i;AYikC`P)KY{WB2S%H^58R3iH$0wV{{A(frm7PZ;rybOcM4dwaAOFvV6MaMW^5S4o8 z(xgXP$~QMH(QD=Nfl6*k-E*7D-kjZZF5Z%x^5+lIJH6DhVEwO{5y`_MV+Oajc6N6D ztHw|7o&haAAY>>KTk=*o1ay8k7PNl2MUcC1F14}{z6k-WMaP;cioJqw>)QF>hu_?- z3iWw-wCXhe9;9h>rGFV{${2+#F%Kf?KOHp~x2Sa=PQw-wj@Qkb;>;hrr(h6JDf0So>0Eup zx$f=J4p(o#)B3me<3eb8=d$Td`l!@6pPF7+y@rvbe0qUE`fqJ<5C#*b*Y7X4W6Gf; z?DXtAf$$FK+<6ha$(XX>oD(qsCD8dopz)Pi3yvBfT1e7|XWfabtt@Kn$*U-yfYz1^ znh>PGtcRn((DaXZ$Dl>J^N3^L?tN-4qD^H&6_+*D5a~z?QRMsc&)Mex@n_AxAxLom zp%AJ`3rG@ZB5P=vwRwT$(DdmYF6^4{_({#QF%8MHeU>}`c}<-Ak1(i%-Uix-82mTc zfSh)$Yhd7TFBg5#(pqwjylfDeIy$2kth8SCzo5bIiEz@FnPaEM&zVS&E4(?dAp#Ln z!!!FoCZBe-MjXv0(}a+S%1B>UIKWUMr>7s8m1bWcKgS-aoZJhk8XB{J?!@T!8ZUju{3+O@r4fJZbpGx6p@mbgJCIfZNII{)}Scu&Lg%oaOe zW9+e-8JjVONIq1-Ve>0v&#rOyy&Mwc=P+R0?zkElh!o6-_~6YwQqAC5sAgv`@}8h! zWNeD7ElmDuK!SebknnDX>ph(90DA{`ocqb%%767x-v_`yHWM#AKRw859(FB z;{R1QO(6UTIRsI$hb!$5JpO>$inyzZogx0nB&?oev?<2l77`h8FjQlh-jNeot4siV zL?FDmx$P(?cUD?Dx%j~FEOhMQWXkPAA-wb3>{MvUG7PiRGYm-GyFwPK_Sxl^l8x3UZF%^ z#H|PTAmj<5A?pV}8C?6Ezd9{CGmbdS$9)!Yt&Yxe{fma@Z!f+@SnsH|lx%9Mk;?AE zc|H5zZ!Z}xDK$^hxHr`wq}6Zdp4@gXr+!5BPyAw%;CoTr#ApuCorAPQg@>($_LYVu zhXzR(YvrP(nlXF4u94gR@hsXI!kStq(d+>eYsX+F+~~?7#cfEbxo*45lwIYZ3Sa#6 z0y+x+9=2fQw6%Rnp<#|RuOvB}U?eZ+aBerAHi*hj zoo9DH%C-DL>`b`4bUCI=NKM^OR0go;!otGR(!M}60!j1p=g$d4hgJtPa+$8r9CvrD zEvqzAPC%7QXsnE^HiXoc#tg>cZD+mOn+Mgq<+JVaX26zK*%*tmI={{*ImgSbYy2YZ z;Nizo_rAVC*op3Jgy+<$mtrxZ8HfFRwv)_Yt^kU=`@N*A(u-MBJ2g@$z7yr=7~QD| zDwZt3S*KvzpoP8A9%Q}(v7O2L z#9Nzhu>`y8Cb#jk0`a)&vS4Y%jq2Yy>GmQR^ySLNiIuKR89&&z_A&OLgLwsx=Q!V$C~27xR-#xwP%<*xg-TO>uE=NVZr{30OAksDJvz zdazYnD=jKYiK^Ska&{s6wpG?uB#n{2qK;4fAZ651s+%;qPG_us-jsSIWX#@N3AV%0 zY^B!q=LLcA;HVkIf_9XGyV(AGe_>kyuJ7&L?z|2`1)`TTdNmXoV{`B_?Q?Y>ktQ@;aS6Il5+wMlgswh zxw-Lw8IY>1_3VgFb6#_Io76hBo6w1LJJW~!zxz(w$*)2P+>|Ub)c0bDEF~TvK{t?8 z6)ylRfICr!F-u1yn{vz+%fmmd&qkF+wX~A{=xf+m4$hmrG>PHSG_2F2r>8G)56@a$ zuQA;ZHzRNuv!4dq$>dzB`O((P#JO3bpYtW#vMAddDqQ_2iB;RXQZba1Pa|FJ4R*yW zlq@65^I2w@)j7>wUq1;V0ASEE(`ay35ZTSmjlhfEsy&^t4?bz^VAbMS^sH+&i$j}e z(=6cIvI{k-0kllJL~uF2)bOe>Gi6MT|QbGtBgd-yrYeW?`v$@?2tVX#;2&i?5Da zww$V}s!wQbSw2 z%h7y&LMGEhhtc!hWSnGMTANq3q4_zxZWlNZebKb7l1F$yHB)W4l5LinsJA{h+EJ_A zeZbI11fI7YjkZ!_ZiuNDn|Uk>bEy9QQ7iqY9Ig{a%&>$D6lv~|C;73Mt%_V zxxgmU&)Vz!xFR_G3EE;kX@J6?q_rj|Q{?M-EPSrhsO_G9O)9RbmJdIT-0;*r&+)`R z51zgP#=&GH&GYusmQx_4B;_VBj;!{*o0-`=ZU?Im$Y$goQKYG+R*n)G*i8i!Wme-q zz762uNFHVznn@|eRO~Te&urkC8WnZR*3ZpoCmBM;s+x!+OY)spKA5o`U87({Zw)1H zPo$}6YcDJ>3$o;JpJ8SAR?yE(Pg*~-DTY<)aA6BqXhX(hLVhJ!pss6{Spe0Cr!^c zf;9v@r|@oIRZ6;b2nNDHyK(L~h1Wi>yu7@EwNnpN=6s)Oq{NzAsiwKSjp_K3b67cT z#G9^Gh3Z6 z%ES<%Cqd`{UA|ki$%G*$A)t#vL`_ZmN@bSAM{7h)_v40$V#k|`dY#Qn-es#hz%OL` z{@}fyAtZWHC+Sx5>jXnmQC0J0s)(`U)R)#lDfhAYim|l%A|G`IpILpbvcpwtK{r|iEuf0p1s)a{Q#^^UteaUcM7=s zaj;A^c&)B}RqO>V2@BNRLZ#5x+Wl)a(hu5I)w$8Ml7-L)C%Nm6QHA@e8lJd=I&mIh z)59u)l)70y@2%lfhI*b3MFkdlsSwL-mnXktrdC!C1*W+X34?e;s@Ub^HS0C|R=!;1 z7pXE*#+7(?n_D?Uc!7 z7253rE$!z;rkRTIf#kS?11o&U|WnG2%JO_D0vF z&au@A)z_}h)=qUzFp{lJH^~H7!s4Wnz@dr>*QEim;7qdg4#3msFnGITENe*>&3EISb`NpZz@ybf9 z9Bgl|AAU`$?DR$|CS?T%=GwNiel>T@6>mYA_oOrl5s*+w-q-+%S6!ZHTCW$5`q$mG zI>L|LjrQ9oGRUl5eN~J~HhOwwh;B`rTcIf>MO$w9#ksE0YX`y^6Kw|hEtc_CJJuI5 ztpjQm9n4$Yb#tjqGUATZaxy_JQLmiKohS|Al4R1xn1{!I=V759=rt}^L7oFl_#8Wf6L&lNuLi0!fBxGhQti zd&7DsE;Q7wYB}eJn(}DMDiJ@vI6giGc^btDV9J;-x40rhcgT4;bN`sITffYk2&lm? z?HXCI3N}1TdexLC;vY6PrK1pGqwehJBzlkG?*9ayv`vzWUaPFH9-@8$3^fcw*gTyr^urxAz||3fAfT!a_v2slL9xnVHl+T2pp5rHX=_Pq@k4 z6~kVsjN-O`6FPA26P7VVXG0!}Ay26qCu@HEF#bvp;XI=OSfJ;@Ro)9SCc-&f&-H-j z8;pp**F{C!DcBgJAP}t1E|HCXCL<+yHYrcNs*7|8Au%jb_7jhod187KhD zERrMY!;ZYE{sQWyqod=GqrzvSibP^E7W9KeF~-dz%5R$LI;`z`7wP?3Q_Dg);#FV} z`S|hM7<(h%d*zkFmaEgoY$wO~#2wv^+pfa6Fd3?e(=7bb^Xa&R27Vj_Rz7d73``Qf z$+59WK8w4@n5HW-DbH=m#P~QQv!%4CVnX24Utj@dJDz5C$;Q7SsoRnpu6zN;>(`G*&9buYPxVM*CTZ@g`sY@4Mh-FZEgCMG-M(Ww!>=QHZxun>Wy zMIpKA=5ys_4o`&!J5l52&pPS@UfsyXn}T1QQ8Y7FF8Aj{>Pd+-$84_-@nSud@qOVf zISV@N$5Xu3yy1^yCkC}E^nIsZFSAsMFS0f4$;LAq+fee@+S(Q~Oa~9NJi~)Jld@cO zmE!>3%Hh3Xcm4f!Ul)t!U&4&yH2X5E&m)=0fPk3)-K4sb{5I8fb1w>Dfk3=tZuKdJ zfcnhLPMT9djYF#aI#__m}uLl#@It(Hf z^(|$c7mV`#NWWnJN?D}+Sh`9-dU!ns(_4}QX6xZ3InluDX?ku;hvFeCM;X?Cf6Ay` z_p$j8UZ2;u8Jh~w9$vk@O6%(H8I7_UA4WM4+eIyqg=DJe!lR27`xc|>)kH2{#9r=xp(LXg}{In z6CFVaPYhUYxXQ0zzx18?@LJ!D*@jN&*37@A^%UtK1Iun@5%$Sbamb{xz-X%e{%gPY zZO_XUuY-G6FV%FeAV*-QOG<}(IPmS?{rB9X(_~xm8Hxo^*sj|Nov=`1ttv)WTjQ=IGaYK7i29KD=1~ z2OkpI`n%X&M(qV$9Ok+I?bybPpQ9lU3IgzjfH(_f!!=61O&%mr#ZV5eYxC-o=RHB> z^A2p4==k>Pjby4C$1{}cYnJEm7c<5>mwG#t$ypCjmKTzqF2J!kSTE;5_)8;L@}LCZ zd8Y-3nkZ}VbGys}Yim-G0B%4u1{d1X)wO?_J*am#Ur%@4i#W1rCoY%~L|46F5@`jNFtetn=5kJ&hj+%> z9=DX4)G-=YyvhASmcY$Lqd&LtzdKh;34XO&-aAG2wcfHOVea9LpG0wD4U(tTQb!#< zhjNqMrYwP$7_46g1OIWGStT%TB< z4AZ5C4``BdhQ(_5@iVG3M9E=QU2DI(jABY_<<6)#zvfF2CJ!Qc-dvuEqhWf{iF#k} zKx>KDq+;VrWB1P`0&`oI)F#dG%NvZFE!Q+Vss6eFZ?MDF?Bq98#DQU6(;o1K1din9yPe}ZKTnX!r5mzT8dgZVJ~F4A^i(`(vt*?bFiGr--ehThaFdW<+MkU$XqJtj3($B8n7Ry8P*-C6$1 zwi4dHZ3+QxwxUVgBTI;SJM&7(6S}u0H^RtGh#;blO)jbGNZakX`+0!Vk;85E?b_AA zXs;ABvA(Se#-Sbu$ z$!iJxEI-wWfwNdG2UyAc-oZ^6MYMdJL-W8hvtz=U?>)*O{aOyk;{H?SCSz0QK^G=o ziUur7FPfQL2yYP59kuK$FTwW%I2n&F@lw~gfUD3fTG0kl@FY|yEsdt0K2w10dHsSo zMS3(Oeum_~Pud)a|C+MZn-gu9pLWmPP0h^8DDR%|{Eb>I*ABWHXP9{P8~V|RkdO%& zVG0XS6l-0Z6yeSTa4}A#nYOiXJ64M{H*&O`dDCHjzCgcHDGG(z3gFPzsVUP*F=izYy zFio*~`LfqPAX&NgKr5f(alsF*KNTu%H)g$w0JVjk7g)>S(njGsPT&V70K^6q|x z38U{a=mb?AZ2J_Qsy!vB!K>d6SDmHB#S0+S>I%WPK{GZpgI*r513mh_zP`7@3Z}7B zs+>fBPyVFbI7H7GsymL&rc(om*VI+xZixqobogoya{GRdiQ4;I^}P*_y0fGDtVCe%jGdDB)Qz<|jx91^Vbtj@0mb3~eD=D+Ee5NVj;5)8gmw?Zx@w<&= zCJ7cCEX67GaNow}DGF$XF^6%zxX`c0hFB=q5+S@dZ(q}FmUG>%&HP*@xK@zIMfP;p zqf&S&*KB_UQ*9wd{73=>!oke0`;BM?Ysr3aQotZHo@*`&Vae*2TaV%i!9OPL;!AP0 zd2X$`WDJzfANis?;#wK1vPJ4(HCk5i!$h#RTxo- z9&St_=NhR}{JHTnvc03MI@whKDS=|_YPQm_p0*E27`6bi*=U^vI7gqsCLFUIuhIB^ zvoZYDyh5WCD4R|Sp#AFBN{#$DVlhx`S>Gde*uKgzs&`y6W?6}Bo5ifx)^c5Ft7d&K zU#k${b*LmEd}n90A7@3RMd6_~tE~;iAr!X6sjMw7W{4^sHpF!Vf)m&o`4NEsgN<$J z)R@<$;}27~(Q~U87c))G1Wm% zTB+SHlxx*mP89xbG6i>|fHs%iO4XaB+x@mJSIG^9(CeXL>c+Vidc@9e4FAB-egPW< z0!o5UM<@D0Oe`Kc%{9r7YZ^Au03(1_VlPW<}4sqG4HeDu``Pb!>Ks8sB}vv=i7c{S8k6)A}A z*|TTmZP&2lAo~UN*81$_qoZ$t)`>{? z^jU7M@mR=oFB&w|H2EWq^5A~i3O&#ncV_gqw$Icem<# zpr3$3wcVM{&?q!528R-apVY!ng5Cmyv$daK2Y3wsP!VW_pXkXOmeur``3^5GV%bv@ zQ+g_9=#_J7GkU)pbtVk$K6(vBQ+Xm#m9id)U**AGLzoTjy=nDZm}y_!v@{P}=a0ZX z=|`Nt5=bL1utC=&gfJ*XaWW8X@U}N5g-z?t#5;{1&@wX{=`MaeTUw_5ezNJECd4ig zvI85&@OQTSVqix~k!V{S8M4R~rL;G+R!g z{EjeOa49V-d(nF%_>)$x_O^@+pF1*tP^l?%r-w*>IhKtpBP(sYug+04tl`tS-@fub z|MMIqk#pXbFE2DJ!PzU!3l@DzUPMhr^|{T1ceH=~V#Qm}Xc`C?{91hBvikQ27@BYh zY&$TkFWe?Oe7ZH>3O`vsdH$j(%~4EHcxE>lltOy%F-4S=qWZAc)}GH?Fv#oa@l+*a zq)UtKw>Km-x@WlD5hbyPi8Vmn2?E~;tGI)yEKdj(7Tjl9k1_7&zm}uuG`El|>aGAztALZ!|kIu;hw5wubVtDPh# z*-MPkYi`KO^)Bth@x#yst+acd_tnrKKnmQqvvc)sY?r&q;?&Q~pS1dYGG+Vr4N->{;~0|ro<%S(l8I6^KOp-;JKIYii_SY5stA#QL_aF1X2aa zY9z@#C%0+>-7})+nTGR7MQ0+^z`kGaK3eqoTeEG_nZE3`@mGjCg!6U|jll^Mm%mM& z?)zE6lb`A~wj1Tl8~>1kbTI-u9`vul0*9TAX{&8{b2teT(TzI^BVGgO@E%?-07X3(FoA%CBDL+&NSV4a|tW=%+9NEFtMQT!4s~z2Fkd1n0cVmWLRP`U83n# zwmoN>j&4Q?G*ysFF$)U|!B+ZEP*$u{&Gag4)SN{tc}OBDotBx7uFs(U$4yoKJ;8R; zwj>LX3aI@uP+9x9d9lBjKQQjB9F8RD=bWbn1mwuZ=S<}1W=rO_y5|ndODpEgYtgYN ztSut4@>W$L^8`2Z;%R&a2b!!$eeUi~ZcvL0jek|~HO zV=A>v{Oi_o;?!%W9gEWG`>e%N+nExgcZyWUNH-6bm} zMGn5UhPI;Gsfvqa+Q7VZU4I=&jH>HJDFg-E<}&oANXuTfUWzA6M{*7|C7FJ|b)q;m zR#r_{%YiLD=|X!jxpI$Z7pu76*_Zr_U*Z{3aoB8sUixpQ7;;fg>|IOLC`ANN&gp%eC z#$wI4Clj)Yii%i;@Konz0!J$fEVOx%7O+aT+*LU_R1I)OVt%Jka9gR}Mu7sR@xcOb7L)> zAFhynW)-kq69bYI&W5TA3JTv0(SlWX&aM@|GC>H3+z(s1BgfDK0|R@^Sb)P>&u44C z?UdIxo6tzLEGQ|;*$z7*;Vfnpy83Kp!CU1tKog&BWL0=^ETh*lTQr*v3PQBq{UlXG z<_cgq(3%1sX8*7gnlg5kxYTRk+VT++U4?WfG|fn}zX)t|F2t!dS7 zf~@gbbYJo%2RATQ9RlTKP9B~&X0$`uG)tI5w?`vD_@?W*xLMn`)xTga*8V3}!Z2hr zeG2nIlX!LO7jmk?)6B_=btSs3NpNB{ihv}hxTyS4bDUb$4I#Rqa%(H4~FtFi2Wd@%)F7&x*Aq z@>(f-4x~T8%Zp7UOlej1LH|^NSAz@tHwzh7UmayoAY}}8Zb#6(5!1i zH?ku{(i@wbdA)t##l^%pxD#Gp&$P??po&Cx!$Pah`ZAKT?49QwstPL$=^9v|*ucL) z{Kg7u2~O%Y?{>W88KXgXz4R`5ALymUU)vi@n!`fn7y3#5#E6x{Dzu30x|)JJ3&;0= zBsC87w-wOt>^YbofU;`h%`BVqmY}y+O3|z1cL-w)LPFkr$k6%~uVe&tfA+Ax?iP%Y zc5}<*+Tkw{?ZNvTHhT5@ zGn(Vt%+iLg`?A;Tt@an%t`LgVVPO14E~B1QSnJVC!xHrOn#Bh6`s@`%#{q=lL%bHZDbmS1h7hj0zpsK z|6hTilZ{<#P)kSDTvHQBL4bIh_RYjThGgl|rz~IlnZtSbvdoqhXO z@GmMFntG{LR~bT9FGG2Yox2;bg$M@2+n65bbEaT6z{vT*lB^#D>vh}xS-C^5uG)mC?~(K`q(Lzmk&pczB=! zKD|JhU81fID+5vN2N={ONh1(K4y_&p32${(RZG0<#gemV13F0irKP1ST|R-_=(RYn z=QSMs?i40SRD3ib?&o{w=jWif>~3&efT+djwBC(=tbN?(c?6`$(J%nlLHyR`aJdz5 zGr;c`M5m{dg*Ta|!*7RdyzD2bS#(>VOBhU~um9c<8rGvh0X4fI9Rjcg3mO^__={5@ z79W~$+brX*UVcFY)ob-z86&gE9086l_l5-5tw92CQ8V26Hl8xwR+w_~aRt2O`kx;r zct!#*@L#|F0Sgex9($85xdCJz*P=!Do5Va=!Po7ywiovH_BJ-oy?mIfHYLN$nA&<2 zmUy~lv**CK&{EZKtMdjC*?AOJQr=R4rEZ*E~gH{YOb&JyFDFi=(u;iYIOKFnQf zmA`#w5_|@fC3L{jwzU8B29vF)wup|s-YxG=a*I|*t_l2lf~N1WMzR5dK%9}1$IU1w z7j}^ZrW|f<8_Ll{Uz~sR@QK~p+A6elpm~+im4=J0d1o*=;h*glkSgT8tgOGE`7XGr zr5*?gh3oh|MYwm-@WDG|HE45@B)r11wPNv_0)#hjpEzSpo#e}ie8u0*0i6p#KvBbT zBY71p6OS%JH*HxvEolbWbqB5Tt-6QaYynbU%OqT;Mn;c$Vk#?j{21R`I8!#fmf$p3o{+d z2xQZ?J-y)XJC>)&Eqw66%o_l1(+OP0DR^dF=ea%A2NF2wjM8EsRA`siIu|nd9>CLq z4Mq;FBCrO}=y`im>|B-BFQ4dY18b4->=NWb%U>TO$Txo_Dhd*L|C1VtY{dzCb@liQc5oZJ7iF~A%sNdu!xwFs zlDB%TVcO+0_Pq$9EFkcmkhaedygd><{JtI&6B8kH&l7a^9!w@&PKNuD-I52zUbZ;O zk03DFRcLuCDk-tUOlNy3ggAw7UN)sACKe8FgAYu4;sC6NOAnFb7f#_2(~Hw3)jNrB z88d`?J09W>$ZCtn=R?$e0Rzn)XWg;Yp1G zCMmKR!Wdb`ND6QEGmD*p~?d|CZ==!e)lQ?%L3zgTq)sly{T%|yp@#qD){Ck0( zgABjbr=w|DB~G1>*whjk4kDqj3X0hdCW%1-{sU=RflzRLc}q)+4^oZPvIQF)1Ur+>1>kx#A52ZRja0cJ z_JK=Fka?2lPbVy^+cgT4i+gj+mggb~D8tmdGP zf<>>5SYS*O`C47q)fgC^mk0^mHoCxSHbjxRioQkYOw+730i#=a2**!>Px@+M!9xKY zVi2N4w(Tn$yvLTlvnh2l_dPZYWbpq6(x98(-w>!g)C2dM9u@U>7!7#flA@9lxP43P zGl^~Bv_NLy^e7>e_Z=wA0j5L^fA z$H!1y-Znov3Z{hvH93dKU%x(SYR)Y!d0rl^8uo;P(ULF#Tmx4osCfn8p2y(%Ck}0= z!Og|tO2wBB2k0aL_|T&n;Go6ChWq+fb%nv}{|7%=azO4?k!7t1%*Fv@^c3-j2M6B+ z9~nt0ISb4WLPDPPQq$R$S=r}CB{j9J>Z3flAn-HBgZKo24KD+e znCH`tk9_{_!2#Zm<{+5c(Z)Ei`~rA`i;IiHrDpC?Q*2CZhWGC0jQqslPn6#Z+sxPW znYIN@8r%cUE#Rcn-ZO$ni-7R)7=a0h1qSJ%V_>*#4JLD!*VQ>0nM-Jl6e~KLLVP*BJ#& zOw5o(m<^zo@mKxkb>E`e3k zT>y%4on&j(8er0xOM@xFG55hjeZ=GAN5FYL`Zqt_i{5Lu(4Ukb{>HPdruG5?+7(yFTyra34@OUi38Uc^lxx-0MFFk>-ssV7BH_OD7vhSZ!u({7T6?1_5mSh4)13?5ULtx1|^yR39n&7v8 zJ{Q@~U%coN<>TY)#uznVt2LS5mY$L@8}R^c;M-pHY^G>nr-wrRWEEqv?P4>MHr5AC zxj|TjpPwHkw+4^>R%pUu>kzoCF!WJXEka9gYDQhiu)<2p%2R-TU;B2BR`>>2 z*L@o3ZY!SkfH_6smM_$e1vBpTO0!5^?H}EHrw4XFFEzLjmWR(hG&`c3c{L~^0zYxe zo?-cs0RCzSv=wm(hYT*w%xv2)q|=WglsduLoXDm?JewZTSIq_fmOKHWv~DJ8Xe$uK zOck6}aA2<Xs^#T8VNb(MJK++ibfYkibG(uso7aSnE$og$Oo}^3r?TFW;OXX|j*o zj{F}&Y4;{L!|n&$?ixPlQgU*jVy9&249?{2fAQkrd++-eh5At#jVl`TCVhpBTNoUs z5Hc_|nx~tLAx#Q>^eq7iuo-~7!UEEmV$a{Mm^o`W|M`+Sf}6$qe5dkN5?63XFi&@2 zgR`xpoPF*mMjmSRbQ33;(@ruG0nba+N{f^rX{zmXl5C30#u^WpJK=XvYNWbhto}f& z^X$OP)yPLJkaElnvW&@{JY?}Dz%+b*b`K|GiuC>U%}uK*?Cd&{bCg0@HeL2$Cyfd{ zh!zWRgb9~CmdIuv9TGpnxN3LbDhyXe>Kp`ca;qfr&!x20;da-MnfWYM4 zV}PUMNg;3pS7WR47iCJp8W^ZbNl9T;Dg@n{A(^f2HJ@%0n(ep4HAKPea;@sIsj1w! zpr8$Uli+PWm!k~6SHa{_@OOyw9G>*Cx{Lx3SFVQ(_mgRm&hDU5#yT;}2$$!Mzi<@@ z>Yv`hw!i>uIm@C;MMj!?i!+7u##i8e5F{{1DS_PyXT$|(Qyf9nUG3Z!gzAUMPrDvk z18>x{Yk_>^l#?~6_@}Eqv37R8&_KM#RPJxjlX9ot`NQL9@WUeG!s&2XApLt7(9d&3 z7PFyfh+w37{I(u|)3$#G2=)vECmpC8IDga`*9mS%M6KTA&8cr}i3Vc05=M9$6id~E zXaMM_7dT(}&spNxciw^%TCm?~@T9Z)U2qojzXP&s%l!@{IR1pI^eaT3xh3kLCN^4y zv_-&7yC7dCrLYJ&D_)>vM(CUfYXU9C2w2JIGV5q&j!i7meiVMPN)+xOxkErUd&qB& zauAEMm~#d%$%WA%=+fhY(*1_CU{C@eri?m>M>SE=#kPX>TflSL_QjP>kIboK3F`yz zgHrt6yC^{#X~K&HM(`2=asJR;BML)C>8<;tLqdl7-DDjIw ziqlQV?fWhAa^_V>g1zaW5QR$B_c0OzydR{}nO$T%CPgqdM>I;Mj}Dv^FoNZ|S<{t% zxrx0sSVgRiM5Lr)S*3g`YWt}F`!!*-_}+$?JeY$9_zn}Z6Thg=#vH-r5D=5h7yrD7 z5PZ{u1zsVWZx+8XTVGFk%oj(IfByk zM*offHbjPk=c5t@*`oyS>W3nD>P0=?es(%|iC~BZm5rM0g3^M2|DHv>j-%R-&apgy oG%l>n^U04p?*A=66Jrl(@Sh)_O?|nOdi>7P;__l;BKqI|FPAPahX4Qo diff --git a/g3doc/images/pwl_calibration_distance.png b/g3doc/images/pwl_calibration_distance.png deleted file mode 100644 index 4bcc33654f3ba1af8d9191215f3784bb446d36dc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 20211 zcmdSBcR1F4_&qc>aGLhvT?!7oX32ocndY&Z6&VD3cH}5TZ~hl3OZhZ4?R{gF<1wIDZa4 zVQU>)ga6>V%HPsG4_|)g??=JU7u*yL-Egx2=U!cT}+{7C#0>;W=r^)dYnY(o*2MU8!B+ zE4<9hS5sTl9!WyctoVeGm#xC$jusXLxja#@tjc=d&o_aAVQB2d*{;-%vHIm{RhK{A zbv?2wYjr)|J(wyJ0lFyUYebgM0}Bmbv;OoP(eOb*_y70)J9!@OP9n1{~vG?L`zy25%muj62|?+UJh)+1CHKRHIOo(db##sF8T{ zw%Jvq?HlO{2|w6fg{rHf<+D+!^!AEr*7al&ua(C7n*t}hU#hbxw|VTw^;mjoF7xq) zXyg~38u4r?&CShi?CwUY=26yC=XeM)#3rcb-61iQfJZD>8bHds;XA?|BqQE5( zraY(^`ONA@R9u{gW-yxh)$7-|mX?-x_>!!W<@+7<^z?o_ynddHj1GBI>9GABUTTL9 za!H<_-B$9)y%ME!znvm?NIev8AeXb+M1Cnmc`fSkzfFnA$a`P)#_AN{ou9SMTa(;PwUyKG3+j1AIX_s^IZ$0Kbpym6Prqc3sd1G#&9fRUNyU3 zqugxxZ58yJMR~VsWV`){=7@(KuCS-Ad79l}$=H{4R#Q2a!$2NZ=@*VrA#gF zjmP(nnZp2CyI&GM?Jcn*55a!mByKw7n!Y>3jl7|ie36}tvf>lyUc2vAv`<0}dG&ls z>4|^{JcZ7TV*eRB)_;?plE9+i!^}{%YH263uq~y+YkUI2=h^0iBQa0Q^3;*Peail` zX`@aBe!X>$7{l^L0G;{mZAf;bfXmsf#NLbAxoXgY$01*cLiq&_vHhQqW$hnPQBYv= z;V)#}*V)l~!eY3ZC!-_Zl=NiZrUPnG5*;cmFMWOyX^fny&L(M=t z>jlLA+MAFc84vp-+CwkU6qJf9zK?)yEO@$<61{r!S3_5xJ+bkR({#3hCcD5Gt3eC? z^uLDeD1yIKwUWreElr|#?(p;&ST0RdeGs!XVAHfVu$-5ZnwWSQFLB~ceF=Yu!$1XY z>kHhL!si1m5yH;mSm{4*Sb`-I@$VQS(+c4=9PdCwgEi*$Ialo;l-&a-iz&ZXlADL2I54GVFW3}l-dB&G2od$5h7W8A&t z+SPvUg#6jwSq@;jv+LuI$QB z*k!vi>@tci=40vRTuC=)mv#Xf^W(WdrsVyM&dS8_k!JMnbCtSuW!ra-LOgb6vy zA(WM*#1yq?ILJ~nJ;&#vX2zxjpLW!}a2(b*)FNI*#`bJ4x>V&|>|d!2AUAE(>QO=G zJ5y#-WfJ>aM&eotgP_0HL^lom4pU+gwWE!%k&40SjIX5Jo9Dy4xI6Xc39`dv5Zc?a z{Ts!M20v_J8^{;)Y>U3@$97)&;DGUvA0>tFI6J|Lb2LKOf2#de)vfSZ+0S7p zI>4eec9KG?^p1l=UZuSFIG}=*p{fnn(Sj$6AHAIeEsZ&9p8q7b zqpMYs=Y%W+9tZft_03j;v{G=t=E%4=+)K1_7B3;xyP;8leSd+4HOfM-DbOeym;Q%o zEY|pd*TDqtiiD;7x01QN>0}pCrdV^kk8uNknum{;Xa!BaIM`6Uc>MkGPU-NyTp>sK z+i7O3AWd(9h;|J&^p&&#I|$@7uV_{Lzzb}^$eTt{X+`_DG@Dk0+2!#C%>^VH3sADv(@DgWA{&b6t=ionzF zNnYR$?D=V4Shm48yy8waR9-I;B42y{x|FcBLUPVSB?W~$B{3#!ZBw-Y*Bm8IZTF5c=OTZ9l~vJb=Nsh~v_s zOqwa2`)Z6ZnP_h=jKci%U7H|JcR__H#X`?-V=z5&oCuXAC!7-f!Fn01AX;CI6w7y? zW|}e-Q?9LTsf#13)4$;PI-dt#8$~j?A)AO zp0?%Q#uUBq{q_el6Ta(G=@Opr8-ya8g0mGOt;VXHoy0S|4i@vLT#hpX7o_{&Uh)9I;BA zM@L8G z`>vY%Z02N?0}EAH&yOx|x#WUqYrm0n-}j1P`{eTUJnc&>9=>H+o(n(N3kGZ<6D&SS z(JxdyJvm+;9Yyl%)>uX%&$Th7tR{Pr+^VY0B$2{_Co-q|J?&jx_MM%J*BxpIhsb%u z-~NotW7M2p%f`AANa@W5A{rYrX}>RAs>W_x`R$vkVX?jdal+uc_wOUn9CDDn=Eke# zX>(n?=-zGuC!`_7VJ>7DdW38Czs)7IEzF;xy91Porj_H@^zSw`yr}%Az4{j zLwV%oX)RX4_4P9Sw&loAG2;%Ax4G?vJ>9sn2rx#)v?6IFNl8f{3lu7}0hOUKI6KcI z=7Nj*k*}F`t}UKLim+HeW^Zq#wq)<{5Jxt2e1xu0BJkrSnUg)v#-=9YQ!DXQ{MLx6 zzO2i4>_1HP`RntiWU_hO>L)JpTA3ZsP^t|MQ?BKW3S}SaMM~3#1Ilx7}gjB1O?lE6&q`5 zX<^ll$MyH?a&d7vxvEQaJfM^mul9K0*}0}Yn{0$Wa~MX`*VrjhZ#1*wj*k5J&Fbiv z2TR)aHfJy?Dd$l`<#u$WOrm#W{>^qI78DgVe+|YF^vwj@%7LvIR@h%K^V?KRxy?Q1 zyZ(92=R(HM)lhepIFhiw#l|$_?f=qVBR1z71ECiyc`sdRSm@1V#5FYkyDh|-=`}-) zn#@)FoWRtXWa{|r#fym9+4~h`9fc39t>trjKV+zzU6znwBn-#y?d=WenbWz9nH_dm zp?YdpgNKEJ07&zn6CqXL@_t^gSvX$wX+4ZUS@<&gEd71iNPdeY&xY<|gn2F=s_vh2 zTsx9IYU$8(bj(N!U8L0}aN6>t@nHTLbLQ=Cgs?x8)R4cAYBgmgL>X3q&*9k(K;kMx zgvyr6Hg~JMKq6axf5>B*L2m*k-Ry<|Me-xROvx${jl~GJ>7Ei&a03YTdqv?#zFO zFv`z@DDh)jLbP&~23Q%Te6G8RGkdHIvG45k6R6~Y23>;qu{tmOzcmP#3h^g87LJRQ1n=IxV_>?* z5Zm6~E_>%r!jC>3o~^;!X%woKbkTXsPmd@UT(XKvNMw634*vAYaGBR!k^u@eTItx9 zF6L?>oa(e%K8T!zWAJbPgrAbVb$Y_ZhdRY2OPUu^}+_Whnsf+fwmh}C$kKDngxMwTyWx*fy0 zvAylu*56Mc1uFNOS!D9I*$wT0D{bv|?pDd*YiC_?9c||S{IU8~bk}R?ZN5e2C>E+S zS+EUlx4M%z6BWlFZHx}db&f+SpMTSw1XHkT(qyOH7pt`QY_hihqC)>Ev zLDX;W1M*m2AFor-6$(^Pk_4eex!74&(O!9;g`-&iGK82V1NLQb+M*dp4VtSrUEQd4 zp&ClFH>@mEIjd~^o=h^@9qcTIhT@Y=XPqALKrqEeiF&Ni1Eg~G;SaX#{CthuG;B>p zLRy9=k63>BnMMKLppfg*X(ogeXFPkVvmo79`h}&Azom;zswp7=H)l%u!Y4vD-blKuT0;#c8SC)+mbyAk4erwHYs$&qTcuvMirgNM zTHsVFv$G4!o+SX{HyV1P{Xl6GGBdwt3~?1u1pdeY50x%3d0Mf-@ej5kPvv(}`0`~U z*ok2Mk+7TiL}pyr_U@z2l)qUzds9(ZP^V|x(B=KN2cwV@w34DCJ4oKZ-2VoI8Now^ zey#V5v|-D>b|_Sde>&$GVv&(H$?o#)`6W^JCF+2aeH&2hrH?Xb=Edks&Zr>HR2sLO?XOoH zn;?l#;L)S4-94C0n*2l24zmOKx{vOCB$N10I#Yvd5W< zUxW_zYDC*N_UAhWe9{Lg&b&=ew-sq6MK0-gQG$!9dpS9eesA`fVPIfDMML8p3c)Bjr`Go;$vH>R3E zTI4=HTXN6z9hzTkU_XNeR=L4^zy!|cX%m1CU+l|ovM!tYk*xrEaEi#|m&mrB5N_HH zRJ}q)0P2+<*iCf`& z=L2V{jbAcX{#mwKxE@e7!)>+(Xjefou6eY{7e6tLbkX1sA4XXFJ32a2B)x?}l20lp zeAE^eJCvnF#NH3LC7lTC8Z&tf@!uJhlE17$8=#JhCqQgUo` z>|9IcEz!@ENrrAyFJH%cTUv8C)QjNu_$T0f9*(n_kccVSwJY5VMng?oIdBVJ5{qkR z139TmUb&_qMrBFng=$_x$Ni|-*tv0A6}Ou^1g$SxJ`1fdTtQjvQsBIi^kh2Je2IP! zhioq+sGzKNo!3Bpy;3KXpx7w#6SL=sIYr@iwcZ*|9oW;`b}P5!=l?X#GIzJ@meg@{P&EerGY|- z@dc)>zsaAd_w2TQU9+t+`5NIEb9FVG3NRAd+UwxcuoyjZ#ia0SZQycT{8_UDqdw6#OzLeEWs>Zz!!E4sOT`q4+_9@S1RQ{=N{+m*#Y z78p)_p?V-RV{Sr3a#ptMZIxze7nq>hf<5ot>i2$b0RgP~(vTa@6u+5n^A}J}Y zNQ_F5uS4`#w1l2&Ct^rz7(9(ERi&>xAV2|ykX|eOCTOAKl^P=@X8euFwp^Rd@L)=zQa4_^6eGdWegE5zbyD;{UXEQ)N3{(5*|$vB=XN?XK772 za5ko7e69Y{;Tax3U?;Usah`W=+ngOIfLd4M?0C4GDp9vJHI*McxY%WV?a7lTVJRW) z_u5}mc3of%O*{7QqsldK1y#InQ-dB zGzRlpzWoWY|FN9gpT)2>Z%2n`$!~Oi0tAUbDS>={r%!XRJ_~A`k}n5@*Q~o_b#RI-hyVvj%_6F@TFJaY20 z#g!9Arzgs+{$YhBUl?}BJ|@dwxLln>jB2A)U~5)p-Gei`7W zC8}^3f3maV1_|HJ>L?w!jc}8IeRE9TVsoFSOl(X^9}5G+6Fef?s7M~jku{+&$0=h4+ABQsY>oF!u> z**`*7D(bVtyH>j{y|d7Z6A%y}>VI(k+qZ8{-8Tl$OZgY(y?!lQGC0%0lNC1Z(DqKR z@Pkbc3o)a}%EwacvJ0pWPE+#Wm*>j|K3}wr7M6{eXH{ti@9(u1%8wwCpVc%t#6-v; zdomR{*_L{+_h-K2?b8}mDzr3*rufn9I5xv!YvbX0&%q05bp8h(t zT5oexU1v}~7An>!%IzIm@Jj5!1qItd+HbQ7t0 zdkzJd7&#rC!RhAfTZtds7PTR-VPj*P{J04csjjY$l}-jxUF(qyP2|xun<`{;)W7%W zi0(y<7?S{#OaQj_Tbs4niqTb3&$aY6H~OfNCacm{@7@vSmO78i+ni?(qMujmMlEAL0~z=IRCK_@m55 z6D8~IE%xQ~#J4M5j0!4XB%;96iD`EIw(g1BBlaSIMadNocpSxWlsKEN6Q+N=V(ZbB zFwwwr?9P#-nyp`$t-FQ2H2WE}?v*Z|)WIo(mMiGMfA|j`0jAb--Qn+0t3Wnqgt6mi zn$e2KO<@0opf>n){yYDR3jiCn0vrexl#|0@Kk|t{Q+l5Z8wW?oZQ+ituCB%K#|IyV z?H7LL5v_gRHN%)=d6)S*#x@%qe$%qDuPeTih`sxpcJa?U5c*4Q=Np5ekMoTks`Cc! z>FDT4`1q7)|KFkvhlIhMSkH?2dxP0(J@+uOe0Ye9Hq8~3ng68&=VccYO9jwoK=|4< zsGZ4KSOlOEzxpgAgCtzOclMDR^R0xW_LP+0J)<9o?ExMNR?3yTbB8==getl7+qWOJ zo{sEqc(zn;^#*nKW%em$<=W8Hd*|*A*9z|6v5F> zMY8^?KRsA3xd(s*l4I!d(FBc{rb8tmnv;_gmXg_C2tZhK_~{q1x&72zRj$24_h)r2 zy9dMT^r4;MjoVPtve`r?!^X$=PFfZ_2vFL=_okbjg7xR6q%eaqKlnD1CF}$*m2Yih zn+YQRD`b*gTOfG`5CX$;Tbej#i3pv14NE{u5%>&hX2|OcbW$YwnV_pT=~S};n|2bH zIMTI-{9A-V5)GL%1pu0iHyxU(G$n(TS-f!_>o(7U|K(PH$pwi(Mr#ZicY`^*qLK#9 z$*%f#G7e-_lrJaQx4s;%HR)(dg+oTB@KzTbQLar-rq`cT8>b$rUD&TyJ_YkqB+t^w zh{P5~XY8n@1KeF0*Ir92;4ezC4B;SMyzubwAB%2vxaYMb)IWUe5xm8rrv5B%=>05=4_Cv(HA5@QG(E+>tp$lQzCrNLKu}N@P30m0=MP z)8n4Mi)BRax0CNI4G=|=GE@5aD)+Nc5^Ve|;^Lj9Wa@d>9HG1R3tBb1uq7pBWlkw6W}ENnSSVe6 zeJgP4Dv+!s3z#B;>U$Eu(Z)$-H#*|zIOp&fkQPBeS~5H*qTjvalRR80Lj@(92UMS! zv;)L*cEcXOA7hYRwfwnnoQC9f8{}fvUG$Pi8%-o5!{7^$lujv*=@KHzk-W#N@%1xj zFD7d!p(5yiuQcJ@#>~sh8#L~qY;Vu+=&jN(H_dq@HTR!N zEzYEQH6*+p3kMiZq|D-BQBmicTUu^-Ns5az?2fs{-|3^;f1R3}`(gPej zhJR-V&LFXj=N9Z+0|w@JsM`SR=PJcE0v5*BuZwmIUU^pW4^pG6-t?m*ApA|--x>4|4|6XRzNi-oVD;}xaO|dbWiww*`_-*K^&&+s+6=HNZ|MmfqgS?g>^2Pg5 zOeiWUBCQ*6PmKPOH*V-HP)7NEUn%WXkt%2#4q=e2|HCwKV&_?^28ODkw4ruXWLTQh zfx5uTDhRTae*3=oN|NdanenH^GD%NNk?x`}AP@jIzH=I`b=>S^Hn z;@A};5D(I`#8T3H2LI+|&AuA4psEIAUb+q20r&x^JP}645fH|nDsckv3g}s6hJ+`8 zoEXitYXavsDNYJLif$e;n1!W-RN|nZ5fY++(4b^w#K6JHxd{oB6~L84^?~(2bg1?v z!|N=KeGfCC%YvLi9#r%3$aRR*uU@@EfhEg%c!&V-h*LKF6y%ENPS0{iL8E`A5~|>_ zU2@v$S&J|uc(S(4@yot7?zwXuTwJ*0wVsh#S*(a$-<#HMSvsP&hZv+fy)2Z-A? zCm(*hYOPW*^Z=>=I;u|@R}woAI3gqjTi3vVNQ{P}{FP@8VV`n;MfvGA1tNd92Y{XK zt_)K_8DseAAq5neuER5sU6a?~a@0)rB-)OS;62bsv~l4HK$xB@Ls%$Lw*_+8x!dpz zF$TRhQ<5;X5`fIa`dmVcoFoQ6D$zD`a9DjYNVL9nUVN z2tOd?q9hP;oIr}n{--=^PZh%Sv2^m7OYGZe5x9SgBScUFF83^G?DQ%x$CF$T#^upS zm-P?{d`J;}Yf-Re`&!~g`|<@kqmgq@_d@j%9J8J1{1-|wuMG<*f`{xY4(Z7KmwfdeecAns^ zl(TusU@qt)lx|23WSd*B9@924qXgaQz1>zO{?rVvGd-HZy&Em*GOKc&bD%SK}t8ZjIWvWqHF zCLb)Yp2WnITD1^Dj#oBZ$|zz_o&#cud?kBkP)%T;8J^t~4gn=c zUtgcr-Ma)}Vvv)uzhUDDM#yQvaG6>@p0l^3qR>i&8nmQtix7(C#>U1E_d6(&Kf{u8 z_mupXbv=8`zAaFuqX7tYXH9C!r?BH`wcQP)1oVV*?z6$lav2~l^$vzxIgyp0nGZe% z7y9Jz_bDWdP@SFE!lp3IPCGa^Qz6Gb;SPxN~H}Ervd4q;eksWXCgoe6kuhbVQqGOCOC@+9Dtf45VB#7+uI*~TsYk+Sc>W{L3Vv_!*0~5+(IQ6#`6$h>-9qK)GFMH_4#pKyn=O(*pw&-_`Q}8Y5I%_(rfh1Rr~Tce6FW z`mCYC0qMj!JUn(zPMjMzZk#72ZJqe+JLM*BS#E>@ga9!+V6=&yh}>X7eONrCgs|G% zXhRO9tczYzOv3liLsMj9CSE2-GfR5tgC!31vtuEAu(1d8%u{)%ZQeWz*nJtko)y~C zS|LJ#5rpNwXG$?5SU+mA0iPB9@FDMths1N$i!1?qmVnf9U`fhcBTx-o46pFWJWImP zLx?OMu-*JO>e#gCCJ)@utY%?=RzWs#|vXMztPSJl4ht3J3W5`_Z%KJjbXb8l?K2s=?Wc zL=Hg%5+eax-)(&j*%Qc?fk!jbeq#vA=uN2JEhg%`f!rW}TNCZz;%w~ODV@o93q7j_ z$^{v-0>mRqO3JI&T`A|{Nm!+s2nYy}zA?l>#LmaKV(ryaR zA}S>-i+Ukw9&k6RZtjIKLUn*}Wd%fqv$fpjH=K4fNkcmE>Z$}-MQYD_cI_naXiG~= z)YGS7M#T<0&gDk21qHm+G&BT4QWVw0kao+l}f~D?b%gMh6_Jf?9T;jas z%Wi9GYU=A3grnPQ(UAw1OR>VJuQiS_&XR@NNKaSgVMthennMi%6tdg%J#BR6e!jVNiSYlb-uXL-q{(dn%4+zfi&WezPoEJP!d>*q)`As zQ+ls02R=ce^82FmvjP|d2G?b{6#gT_RMwGlXmVjtJrj(fho|neVlA|Fhu+dG8Pu1N zwip*V9N4J16)Mhf_vK&gH*U~Cb^bU0QMnN@NCyG3oSZ(8_uYI1S_Qq6)}p&*NCK90 z*?>bpfYjO9nWU9jCV&Yb*AyvV5n#8Rb@fbZX-|*lWhm(B&;Jj9#`5^D;PM3J{zu*n z?D#Q?(7~H?R*zLFz=g=|dQIpCMHO75T#35@cq{an8{i7>n3~2yD8a?WW#i>lDY%rD zH@J*RPL2eST^{mcK!u@c(hTudkO%{u>Z<)eX%vjcwNG4*{|?>*ZMjNM45XScXz3ai z{}IbT$5ze1PD>}^c^jJ+I*et=m}Bu{IUDJ^6aS1ZF*G%d=81?t2#w)^6?^OVB%WZQPXMXnVnfQHDV7vhBf>gOF z?NB`9`+>z!3S2ePi@iDSMg`mxy}+ZN+q^vLFT?WGl5^g?dDEb3lMxhz+5z%NuNZ>b z0Xt_3;0fDhHI6M_Z~$m4D=YuOY)w|WpIhH2-5hTGI$nt<%Ub+iA>v&J#=*(Udp_r&)E?>^pev9fqsd(E5IlVc^9yH7f2w2JGmH(T1 z6?+%qy0uD!hGzli&t#^{4c9<^(0S+RPiItNCU zJ8{!j*!#*`U4T=TUX)m74Qrdw9RP0v??0N->-_w|lB<9I0De>lv82@SgY5tx%n;yn zDm|@Qss7g4S?azF<*n{lXj_7ffY-{&+PyXVs748YGTXVl!TE1;xTx_RPJ~|7ujOZb z@juGV72^tv&FK~(717HCNK#T!VWDb0R^`DpL%kU7fB27`hX)@bjjx3+*B7Y*WdI5?P5nt(rTao5LCn#Rowj|4ujYWXJpNCUz)kVz&z}kD z>BM;SojB3tj(x9R3>yCHx>wyq-=k9mHgEh$r0QT+LTDp`bwZi>iD@)wfISFuy0 zKI|kiA!U^0d7%7yf7;iX$=2x{MqY6hc||WwXdua(?Blay)0qgeDirbyJclL5r$U2^ zC@9)GyFJd*E#we{n(CTC8dsDR%W(vc1l17gh#w2`>hPG+5gFzi(=q=b z{-Hu!BO?|63R#hu$i|H>k2~Hl2#BvrlSnx^Iyz# z)Xr>zX8*sTba_aIS7=%3JayzrrkGbd)$Na&8~vKt9FP(#37|-K^{PTK@Un)CZrkk6gjv z=-;&L7NOrPvuj}Vam}uvddnGtMUR|s>sykDtnn$m)(?&%mqD9&sR37@2{~HNnp3&$ zSfI@J>J`H({b1N!V+hUp!2@>T*Z&@yw%?B0SYL2~weil@#;^zQ{xnWw=1j!Ow#9ow zQgNj3w}p7oLh zk@HFJj)J6F!0lGK$o%gkNU;Uk{rVS-*4`fjDHk>VSZS68!Ir05S}0UB?)d>;@^@OV zZ0H^&_B<*T(|TPmX=27Csun9En*RP^6SACH>YHXbC?K?jJ(?x?-ch&rc$_akEB#ht zlq4&nh*Ivah%f)~Y?Bz>+8lTE(Y>3FLRH@m$vzGN=XSlO1-D{R?3hNI3%z&KSL6Qp zJL_9ig{kgsU3%q(FJYGJ#LM+M-~M{>c0y~(wqtq#+p1dtn_RXm!dY*yam{F0~FO(>yC{L8v$@Vefw3BI3(k0MKxR=59% z;FbDo%o-yjf#MgUaeus>gYPTvZE2T_>E0&|dmYxzHomjA8<`h=i<{l>#QG+lV#Jtd?a|x<{~SEA94!R?bmKCbpJb$9q!+M*{UkRL|56<>vvf9_SM~8{ngfg-WsKN zZzFd^)68Rq6=>vdc$f?-KPgp^AJ>O&4r2K^*5L(n=e0U}Kd0)cr;UeC*U)rve;?$( zVQ5)iyEHm~q5D%0&T$@zz@z@5(L1r&&n}zvkNtDKU}lo$-l28*%TYNt>pEX+AnPrq zs$XRywiMokUt@*{UineCL&^HC5tX2$@#t+muhki*BVUI>{bOeGePEUXz|8nKfoX(ItfuXT|E4F1 zfR#5*(!B3s=)fN|X2NIc%OZ%sTy9IrKsa8c?A~XMC3dNΝ=iRT3Es+TexIvs%#W zwrV`0==-MEEu2fD=wp{y_|iS_rCrudo6a>#r0x+Z@F;sG5+x%ZeMbD*(snO=`OsRA z3bmzng;!`~X`MmW_EYv9$EIK%?&oPMSH5rm`&HK&hg=}RGlhIDSw>yo?*$sihllR* zb_0Y9{YP4A8C3OPi3K`#wtB9MZ%pj>DhgV9Sbs>JhmndoYYAd&Mkuitx;@}K{mR3G z^zN8r76F>`Z^<&Yh}vjKLh?15VkKvp5lw05`MXZrZxuyMq;E0z`@Mrx*P2sy*L{WR z4pRScW}h+=pR&)=_g|QwPl5kGAW92cGFqB9q7cucl+8Vm(luicUe>=Uc#=Zr%_9VH zkpiR1AS4?Dvoxa5Egf9CvXm6fJZ=MjSGXAnF#*Ld+iX3vyB-#jC0z;Ej9Jb3G5lmc zSp^mQ(US4_=cM;yoAw)xB_x8OXUk3woo)iJl=RDx!LLcL`%$cGtQVqz@@ulm6yQRT zqi{^lJW})H?gM9$nw(+Xy}G@Tw7cO~Wa`n_1xZ^66+UVWI{DTMuUo-hDKI_Smme5P zUZXgz;=!7av2_x-Os6#^x2MoU-)pOVpq9Pk=P)wpIs*eqhTmndqv1#Sf!;UcZ-ot4=kx##>1@NM~Ps5^&?gxlbaP~w6@eIkU--hozttEhdF2J>&3PaJv%7UMXiYxK zjwHWtVCk1n^>Ex1Tl}CA=5;+v5$bVMT4`+k9qdgT5ME-keR;Y;>r+H}FnxsLX~ef`pOV;}H-< zGG6HN$r*mYhG2#2{=G(tV$z`dcDvZF81VFhcW+5Dz)ifnqCcB_*I3UWDC<*0%Wq@C z%x|Qm89jBLv@mT+5;vEe3VnsJ4E!Ch`4R)U;+4JjHaKTn9P=3wS(gtBHE13v6>zwE z6LBFNWE_dA)3~U9!wg>DvYb~**?pSqpI6>1BnS98 zZ|I(Df9dyr)mK%JD1lC`=Y8XoRXne)wLSfTU$n%S$FnZgz^{gCbqOi;fgeIq2&?2= z9$2fTDBr5nh$$9vGkUB+{lZwwaN^sKOUc(6uDjccd5BQ4E&J`J1+)L80$LK3zbbk4 zf{I8TYhS@FlMjr(jy5O{jhGZ>Nr=zksH&+^&=0x9M7QfK{yng+&!RF_C1o?Ydx(!g zvEfYyPH8F(=+oU!BBkfcxT-5^QRMd3a4synkw9TCJpM9#ByL&sp-F$#G;&}~zI5V= zl|N^88BM+fEn`vp_sdGJ%vUEjHa7l!9#XrCuE3xCov=UfC5aCeeY6{Ykx;e|Cjk#T0qYqNkx5c&Q0MUETkhYb zbWX$0hpUw{dC6u=30w3WqbeL2He~cB6l-&{T&)QTPdVR1Z5({kW?-+6=>g@20yEkC zJ8>->QBc;vBMRvE6ciME1<0_s&_M942LQn!x^vDY+BB@P)R2Fu;xXgTzCQLO)!s@+ z^QVkANrmtB)#ih(MTt|L$(_UYT16S``+tw%<1N^#K*#Md^6vutKgaUDGE^x3A9#@v zbXj3>+ZRCV0m7&=O!}a{LUFsy>RN5G2SCCG^uAvKN(GcygxCp)M+;waffh7f_O?0& zf%t}-q#DlXSVsY!RGd4$cyTabzyTdrD8HeEZ2^QL?ekk)3_U`dfO!Ng;P6E~dC~}Q z2+(U#U!^XDo^ql;1swYTfBPPEquP6V+P)=Srl6yXtf~?%RpYe%xm;2o2lzt!{g0gv z&R$!yw~mgsdyu{=^vEjgheLq-L=BAr>bFKYofl%2{5Bf!5&RcvpQWcWA;<;HkW?+b zD@=Iy4EwWLJWrM%C3GzcPwGGWl0Esjaw>WqCvmvJk?XoXj zg=w9qOk&SKkM5Nk(o1;oi=-6~+sP}|6&Do|7Y=-bAq1hc?w$4DS%7VV$zh2)PvdTG zZo-@r6+L}B(1qKhsd(y4%2iwe1_H`F1#~Ni?f<|$T^`VJYsV)XE+}wf%(V6N!b?g7 zVbthvk)2OQj*9ZH&%V2i4rQaO;Pt#-_`)RH=xVD&B^49X7|UGrQT;hmrlFPiS0f`v z?@g)^flK}#TsD8eNKK8_(@VOkN>iN;9b&JObvN0;XJ=i#H&+nIB3Dn-RIG#rz-w&H;+1=NZA{RLWKk&!bl)er6M z?L7iS(z*{w)gqHJGP)h>k0W5;!18OnFI;ZidM9d!GN$FH)svn%m&Ji-7vt^C&4xJ_ z?-yxbW@^~w#5s~XTcBtA!H}Pen;RSaAn;|>%C^fo=YDz`iWfb9P7S*eb(uYX_wHSO z!w;{vKlBqCjd2e=~JMTJF0_9EdNF0}-tq;GO^8p}-X-i_|Kp98S3S&{1N zidD8VYt?_ko2v~zEcf7^)0Ubq$k2$F9tKPK%Xkj4bezjS?i)c3OZH*j45Wd1A3 zBYf%7tz6Nc*(Oy^!N`CFty-C#7ZA9RRy%2AC^!eJvf-;~-N-%wpIvt0T)Z>pa8tr_ z?LL6@{R0DlS*u!E<;=e+{nrj^0D*_YD&0&nPP%kN)C*9HY`UIkSq5Mvti7yT>`x8oSMZxVHz) z>xSHT>2GR+pV5^LJ6xg)l>zmISTP$Kq2vR+=I1|?<>R!7R2#NDE5YQ%x(g#;>aaR^~2Q>#m_P`}jw z&;z>0Z;Oh!BqY*+Q~o@_Si;Mf-|odr zojdYhordfVS}eJBb-=qg&#jT(j(D@S5AaqPbgTB;7tYSke#$KI0$4$S4;wK#%E@8D z5bNzs$^D3WsRfVIX$a|k*-z}=96*23*{ zQ49=3qWt{*qhn(3L34~r)F}vT@ApeGb+kBcEc%H`J{=Vm4D#lbmSSM2t)iv}1`C?L zXGoM96-(~VCQ{SUC1NlQ!0br9YKaa1Gz1JXo7LPDTR&cW3qF-qq2C~;-Q-d-ea-bU#uJ=Ry4Ha*!_AjgcxDRC&i?kh9o zhxwe?moFJ$e$dF?Q#>PG+Mgi^c$h!IIYtm)kk`@zI$aE;GCGEa?Xb}X1u`i#jLyJ^ zHksZL&;ol296N-%GCe=93`7eUz(v?vng6nU*Teu5hhZ7X)1z%*MnJRa0(35rJO>LI zcZDosc77hYR5F<(St~0JXn5cwgHpr{*Uy88g}97lOe$(>O>O!6k4}#Ew^6`gD{OH# ztM!NtC!*87d$$7;B5#;vG+IeaN>Wr&IS-?>7kPLhArCXk)^^ew777sPPH>{`^O!<8SL_pjZl`e4xFa^9N`RC}650J-*v- zXb-_~t{>jy=kL1qc;eIA!OZe-8INq?lnL6w5uEwY#bJ99*o1XiDun(|H9aACL{aha z^B@&uT9Qe^<0+8Rfdeyj!M!%KZ*dkFRIiJQo`NX=XSbuXlT%P|{mruNmFCpj++Z70 z{@3pU^#=?FIgwocPQtHd^RjD@@HBNq%XaazZ&xHR3_-vaz{o z+4}SX@de>90n-}#cUOdkA)5$kn%a0uc>>%XsBsc77`glRxgb%(I|&fD2c$lj@IoQk z$@=>5obERk!`DN+KTG=`BxGc$TUfB+lQLCZBqM@}LzGdLG#zF%4bgO+Z(UulAp->2 zlSVcnF9c$Po%`<}Kkocn9aRRtF^KH7=lVn@qb}$uO0sXn-2X2PY@ZvF@4&!72r@uy zY_lqLn*54|f}F+Sosx`My>rbT%4NCr;z>r$C z`2xvxQz9rEPxT}?er`VGLIMKlNWkf_C-e>#fkO>{>)=jJ31smg=;Kk+(X|0@8OGT! za&aLH(dn5PEToxiTdr+jpbS*(larIW-3ebXvM9tbDo0bVKcYk>3z~mf6o>PEhYSS% z(X4sMe{k{fulfG7hc+e(2?2|Xi@>N)fYpJz6CEFKGu0SgTrqfE+Z!m1jQ>(0xPf%s z-QAIj;dJRjJU$sq^L=3`J$B72**TaKJI3AeDx;#@2%$$g4;~6g@}&ce_6|uP7|;%4 zL?_$ZAC?86n|%V5tx^?IZ9Hdj7BV>TjO~Ec1VUdI~JEeBYlF`zgOv0Hg>w ze-E0XE&A~x(QmHRRkd2+E+}BTH))z)?5j$F-5r1OFP3cU@K6tpjPwi$2$;R)ZHZN( zg-i)>O|$PUal!YnW{=5ST~PC=P16Oq-k_r+5!PA)mJ&c9u?%P#&87dcFP<2p`B>-S RQsAj|44$rjF6*2UngG+nn~ zrdkC2hh`@t{ptoh+;6<`gTIOG#Z~MTtqkp*bZre#j4Z7z44CZnZ4C@8?ToGLH?LL; zp`hGHkrsRU$~kUr+~rB1!$srvVjJsN{li7`Or__@2B1P)Z|gCQ7tUx8BIsd;g0Ssc=5Dq zaTxw&%=*rBij|U zGqbY_>6p|6*!gN%DCVu{i`~_C$sIQqy(X{Iu%cEk6E@Y>)~aVe*?x3-u%4`#DzCyw zkSv44V!Ji+lfPzKvU7Uw?FbjDwH=#-4tLx}~LsHMafZ$B)jLnBEfDw{G4v zIcu~#9+;ll>T1dnx?S@2`!hUUvhO$dl#Pu;u)G^YDY(T@p2^58cO_k&o<4a(kNGPj z<0dBN5BGl-=M}*^oXHiJ~WXM(`e|FyXA#m;%G_|phRxI=8f2l$<13qT9($;tU_mY6f|Rf5ncBw zo-wSqIkWhZrdpW&_oi)5?*^qmdfaR?GsYKOf)O}`lC%13x%|~%b6E_xx-$5i>C-sM z#E`F>#V1h@okGFDvs`KM_1b8i^3+9fFE0%wy{w2J6dIE;2&$dB1&))F_0rhfdnWnZ zdHdqcf>2*rN(ssu79j;A!&F(iiTjbmy9Iv4&qnDvi}bF#`EG^Kp0LgjI8%za(`DrC zt_dI5_9~95L?{{ByCCl8L&sTUYlwHd`ZrZTqV#-GtNzwLTdtWG)9k|9o3#MqwT>LF z|5kVvpJdeUi5JGKkCS1yV!Fmy^;HyM^PH)lCZo;=6mIl6d88Uih#eT-3bK$jPhsf{ zmY!MnuUs0YM)B>f_thrc8g1-x_EJT~YIlMy3=R9%H6#%~AsBT?zFM2Oxg^nO%`o4O zW#~~7NqSKcWW{4vg@lASBJT$7wm0uH(8YI_d(+1pUNd?saqyv=x-9kk90w1dng%i> zAw%t^cZ1`f$-%bD!G`VSUJqhNc{CcuTK7gAg;b8&%6rckxF(VJ1~Y_+~9y4Y`HHSLCrB93>Sh6e7lDtZxOa z$K~|5#nr~S8%|Z+ zC~Y8y0yf9dYq*>rKON_@e31|CQTBEMh>PV@%x%>k+`1_I<(D%MY2A32ZH?iV{*6TEe>M*?l81t>md=1 z2YJ%gv2)5Whq#MB<=P`8&MUk`p6spf{&=0-4aak`?i;}`2`~cz@uU~>(u;J$S{}dA z7xx8i@iw=nd{~cLXkE-Z(=C^K5K{>%P)Lr;3;+1UFhu5O#mG}n8^Mxq`Xv$I_=gq0 zKQIlyKHHv`orvy>&F=P;h-85KpnWPBG{c?rj=BboSY{cdiO9!J(V{ zh|gs)c&=u`Qzi?GTLzU0_%gS z*uR4NUfJqM^q(8!i$=B6 zBvkVFPi(*I>*FIneMxie2X}HS@^Uh~TzjP2Ucp|4o*P<)A2~{Ns&^fN$A2%=&~biB z`uo8n>QqMo{u_$dZaw0Q9$Fizg|n_3g;%G`Bu=+mj>zFV%$u|ITGWS2!b!mhD?Y&n zdBYV2+^ld7@!TtO!y)>b*-OKMcQ9acxjGauXS>x&xi2pocdigzY`(3z3!c{W0A73T zgB?86!Jv=o?DB$H+lrB^gc8mK)mvg)L_a4$e`%P^x>N@)W!{v@S@sZa!%-pWMMaOI z6Ae2nHGb@Py=czb8z;}KZff|(0yFr=2zN#rt&$x6=J=~=zZ_T(>TiE)Ej%uUJ%52u zQg3$cC_4I72m5<%7Jm2$5p2-w!jUs|xWyar*?vlqtTvA{TCfJkh5oHB2L9%@tOe~! z!kxp*h25IGD@s;O|EwmIyR#8MT1+ofLt-SwQjH+XnvSR;HIHz-XC2pe{8_ZEnSjyZ zEw*nWu;d~I!JwW;abNN4Ts&%nU4<5(Fexq=D85dfHbb1_xpBBs&GLJ$(O|*bni{%` zyM^^kufRs6PQQ22Wr-MCayf7t|F>xZaU`tVvns;orz{m zUgh^5%AnxahDHXg8T&G|w8zMj<&s{g1)=*mEgXtI=5}E&wL{4f>z7)T`z+QA+&+%oVuLBGCAqlHZnTT2Xzkbcl)pYX{j$YGUMGh)u1^J$*J$)q zUq!+(#)aG4we!Nj*!bo@6Axr7_!2DHF0NN!fr}1E!$spu=(K0PyN9?=Kw642n<=?? zPq*M6(-fVws$3Nd46wUNvvtSSKyBT5XK)XyIXWI=c&cG-y<7jIhTb=HUB|1!W%{|Rn#D<0sA zyWoH0CGJnq5(WArPqPE4{&cog{e4XC!4cctxpChkWaA+fxG5v-WA>3+dmwnKLZbY9 z%q}Hcv2I^}#FQQbFl_tgb$NmE>eVZht)tlxeQqB69`ch%S&E&^s1<^_gst4+klqCj?hb4pES{?V z=A;;XLC^xA3YBIG=b9vJr{swH3_U&l0~VGL8Rk+n3JMAU^3u}Mx~l_O?Lv<0BZUA* zaR~|0pQx%5KX~u}u=pK(e6i=x@1UZhCV1ZLxx&0W`7R`NfBxNc{C06n2niAP@#5gM zaFaAOF|m2}zlKRrDiXVX9&RNIdfM<1|-blRfWMk~rI{U+Vd=j6=u_t8Dk3d?6ZXH+-Rs$@A}ry_6O?!F-XS&3_v~yeVPRn?q5_%4 zD?W}}HtOX%+wbMK`!;NCS(|f{zJC20?bP#5)MVj{&K-Hddv<*u{FV<7V`)3@k#r3RB^mK=t!|`HL&1Df@)cHYzQ7(S76e6 z0`2t)2wi&Hg!eO@4YqGZLFO_e4=L+%P@X(t~+EEZ2-pOVs2W5SdB>Ugh*yCiG zQ2irQkDIEergRzhTiu^NlxQ$(5sk(_NOd!evOYiA-&-A2sO`0l+S{|&)7R$`U<%61 z%e#Sz*}ql5+0xuxV_JwyO1hfMJtD4<+|*pQmx-m}s)UZRb8GcTC@YXmNpn-?qgDOcD?Ui zp7C1k8wJ*4-4}T`YpmRDvaYVtqj_wE>ge$V*b({zExoj zl0cmkkT*(w+NXuIhN5l!G!T;NRk$GSa+k%m%lxt%k*!mD2tJifdNKD9^VH?8gN=85 zQ1sn@J28XMFtqe#A6SeIZd@-gf`Y5S1D;Bq>PiUG&SI(A=prhF=QRI4mzPfkcNMP8 z|F3@JgN-3ihV|bPxylrmNOH^A4~b#iuuesLuuLQ5=D$zI;RHh_ zt!!`RA(aVa21Hs%1t%dN3Z>DH^IBBQ7Z9dKf%%!a?yK}7M>^+Q#fYvo7hGp!vAZiO zF|qUPcsE+e<1sNYG0GW4@Dlse+a~%Q>v+$Mzgww$z-?Aw~yc=A07hWhUH_k_DPdJ+o z{nmwT`~9XFSAmN`FG@7Y#?JBXHR}EU_Dzc%BPcpLdT)I+@Wk@+p1FPR0^_)3|bwBPN11 zPtuQ+=_|vQfH74gsH-Rp)JL%w4v=!bM5c&z03*1II`{WPr)qP{4=S z^c(Jwk+uJni$zf>cVzPR_O@Re3N7C#YejDJmyDz^R1YWx_n}Z3A)rtYw6VA6++Ap& z4PdF#X$&SCT6ZjS+U#%`)RZ(fPD3s;c6U+iOK5B>f>i$O$FoHTW=jD)>2qP^ymEAO z^f^4dZqxISzs-;%mfPa*;NW1G+$W-qPpD*3yVA_CN#pqk ziiXdnE}C@kJl+F6KLL>*eL=vqj53q@mdvoxJ!8 zIqKgf-LnjtALn1aR8_2lLt=n+(D;D$Mv^Ovd z{*`h~(l%zs?%);lj~?lbmDmUzj@dO*0NmryjJ3A}c zX<-;2{wS4Ur}pwrH<>#OeC0!r1Y08RQPm7f#<}#aQ*XP+ZlnP`Hsaz zFRVYwAwnZ_#|EumU4EJ15to~LicrJc#N07xx}SsDxmXw}r*@8AC_!p%G) z8yQnS{7wK!ze!&($f|PxYifd8%ImV7F)1yraG9slg2L;WbAfX@{|Z_@OY&6{Sy|bK zY-~8?;~g90<-?V3JXpTT+a*$NcsMh)s)C{@M=vXpM9~75VFLhVl;GmCx3^atoai`W zA{+lWBABeOIk!Mq`F5brc<`;S7b;mhn(y=WEegsl3=C-ng~b+z9D&T@ z?Y&-w!&k(VT1G(kqMbyiXWQku>(j&Cvi>WU4Z&SK&$b&;U@P|h>;ec`47X?MgC@@tKJc_5b7qTXW>uK@_7&dD301vxizq};*OyvHu z+b<}Ha26f8%79l0{R!{EBIVB9?gT$WtbA{CGEqMMG3xWzuj^YP7>FO+r}4+$HR*@o zbl*dP3j#qV(o@lD%}TQQ`|)OwR|2mM<;|Nn1y0xU0Yf&-DhO<&ogQvF8Rb)EjX(m} znQ1J=5AO@}Ksh1S{0v1ro-Bx1mLUa`7qwkPVLu$$YEb@d?#7)P z>Iqb0cq{r6!3WqUGRWWjj%!T`f^N=BNxnq9*7ub%A^;CoU0%3B$tBpoc2m8~o}QYT zT41M%3KH0a>nf8{mRh|xp_X%9;oZA;Q3NiIW~16MF*>n5?((TBoO|#@mvZU6?|GzB zoQ#_`H<|J2Xfvd~jU?9G-X9=F!l>=yM)_ugUhe0wUmGSb4tOSHSN8HfN=iKea2qY_nrjui9sG={c^?5YfSYQTJI(_JH_;%{=ZtI> zhms&EDJiHCN(6ca2D6=$ogLeISm^mioj<>SfB*3#T6X2ZNG&;wEoGJ3|aU^U&n zh+L-UfVP^tku(j(m30T-Vppl15t66$^z=x4oh#q9&i|dZzQ&K;XOY%ZroK~CO5 zY!vtO^5Ps-CGX8m3iqvd%KLH8R`X7I4-ES*AR^6R!f6D2`V=ML!VWDk6xl?fcrt!R zR=Wk+1kV$bj>oHv3S1szvNU)E0oQzMyPeyX?YT>DT>mB$$!rgG`1a{)PIZE6zTpdX z^?>T?OPoRFSFb+D#*WU}4X-=?l#Lc~bv>c4x@FRCx*Buwc1H%wH^5t_M=Gdz%E;o~ z_qx@-fq^fvu^I8SDS)8JAgJRL5Rhnoa`rztafPcwg)EnF2mp+YZ99BqOU2k7>i_1J z7D^$Zgt)kSD6R)g1h)AO>-(PHR+PC>z5T5UVT#|Hue729gU)jp@! zFKxN@bLtzo1v8-zZvay*IhqMjn5ULv7-SQOoM|aD%yCPj1WTNe;^Oz1t~n zaoQNK=q7YFCo1hB9)A0FAE|sp6B8An?9|oOMas?@sGk5ibtQ?ShZIidc7gj#lsT+6 z1QK(U0BU*XgVJAYrCnq>zLIXJFl3S%-%vd1DPwEPj^+EIKSSBBf!GK|Cx3XT)DCba zJqkV{A>8C7t7#wI+4;F@kvTEIA4$#Ic(}N>eU28~q_I3!Gy(!+=Pb|Eg6vL|@~95~ zu;nb1FNW0X(LiF0%`+Pc*Q{07#>XQzHF!Sx{Q5rTd_|S+M>I9HG*c53hy%2AbW*}B zDKB2Uz=6`)RY58lY%o(PLvj)iz%LYrb@laRMUu(2)AxmiCo_uDOZR$Yj@R-#odL5# zA2JTo77#&bZ?B?bZFNghY&TJb+_`h-&;MH?X}ZPmd0=fohJU+0Mi1*Nm!PZ#klcIm z{(EaAQZ}VQn?>Qa=k_Fy7Zu@r-z754{fvhQiw_-P-ie(GgGttD+02O5N~w;3%9(r+bet_Y!(_}=YNhWIDqZmi@4l_x+&*j~4Cmex z@vQz4NKqUKcu`=@{jlV1qHnG!U%uoGC5nBKA|UT~fHU4PW5OX3 zjWL|7kIo$%1pHXw^LdW{(E7cqwN?eDJN?6lWdN*rpK&nF~{6CD}Bi>SG6x&CMw;Imy$yS`V0Kd zM4+8fP0_EJLrDVdA6gL}oyeTp1jzmWR6ze}SuO*# z&5jNhOFyr&KLSjnn)ssTB=FXP`bM{uC6&kC4Gp_PINvBoC~ns)-rYAGmpS^P2J8q3i@1uvWyw~ny*!z^>WTW9sp)Bs zTh$KKZx&uFDlR^J*d=0Ge4m+usN0t1xtQ2k!+MfUU)l>4BzvU%{K>BK=WT->O_+aF zR8)h<9q~FhXlXz(-f*0b=@d0&_pONSiDg^nw}}nC)s06*8`*jjU(P>XS0jq*>B4Zi z$SK-Cwv-`OII|#}?+>OLIX>;V+Ezj9pRM+min8Zdi6Ce2Jw9CHAuOG)Z%=QNJ?m-m!nfuppAqok@XN!T$(hg>wBu}}S^wn%94hmWmej!uHw z=iWCuXiY=v?QPh$&IaxwU)_N|u>B-ZbZ4QNHZkSfx3_@4A=Gp`k69MyTTKcz1d%SL z{}iulKl5nUaCYS-OqlqspmR_a(|~f^+wp7-OL?|PXp>p+zyb$#z?{KI!ISAG+eHGi zChSoi7!8!SveE3`F)=YTolqSWXi3|kKCHjC<(m|rmTk7=tpOdzPjUwM5j-9;b=spI zPk>}e;o2VZF}JEd=jP(#dPqxq6_`;c2-4S$9|TIN<1QI3@ECpMFnDw2(j%KfIRVc_ zBP1isiQ5Yr9L|_v{h67J%vDEvT5juHDCC|;xoeDoPMcW=zpZ6*bx!m}r3h=Z z?3;xFR$JG{(WP=MU!j+b8`?wx_FAmjhhA5uC`k^jVwYOaV~hS+fxku^jQVF zsipibI?ja95Dr6q{mHLsd#h=ypkD96qeq|4PY)>^5+!gLCz3H8owqS%kfGRN!Kw%o3CR$g)eD%DNoH2yHT? z{^WiBvUB&)ftL4=9-^Gs!GIhHL^-MsaOl7lP5bQv`oDr1`=R>m3ASNHv9Cfk(;Fz$ zEMs%y`DVW?-gI(~=(B>9&wxS%fr7AQ)VMuCf- zGHVP13?9XLX9`aqU5=ym;Vyraqa=?HmLF>wXQ(5vhL~On)vLtBitPl3IgM(0OCc%uAS>&ymg)p((u^tuYmSKUw^*~P$MSu022%@=J53>-E)UE zWnIh&t_RQ~ew5KmZ++f}*T`E-|IYj;<_C7eDE@=jE{Az1dGz2#ojT)k^} zG_qT-dMSIZe?&jsI-sgN+C3X*)Kvbv6Z9j{MHfAPLxDy>MO2?M!Mei9*!AH2URnM1 zDKjpdDKl`!X)~_bRsFw%jUW3P19J+eGbTH=y*xS&2?vi(qP7nARfLsd_lFO?1ui=) zp+W6sQ*iI?3*|cN%l-KAk%UIios;9^?m{zV=8~!R4f?<{2vxf7Q?jsh0wE?NOE3KKy#@ME0>=!y#h80O4NbD)CXv8`5+t+sGM3-yf$&no;W0>?V~-^ZDHZc$f@Pf|Jof2 zNql^KAP6!4)OXrR|6IK3%t7jWep~V8?{^oH7v+qqihbtm0o4pU6J5^D z)RFgW))r!|gl2|z=39?zDK4fueu}3AA!HQv>IIHkjxO9BhwAp!5jw2yfYCufcW%bXAh3^;2gMFi# zeWO^_3%?gnk7JSxU#QOVCdFi!k#SZsrp=$Tzx_JMNs(E+p8j{*UYD)hXpuye=KB2H z98q_sUx96^6Sm=WA3k|hWmw$R!ZU+u@`Jg-ovF?8Z{rnW@^YZcVDX509Rd zd`|kB1d5k7$3+!N!=dpAStC}Y@B4Jxt7}P(@JDe|&cs5>gri2~M^AraPVAk83W15i+#qb=4|C=4lt<(z7o1MCvz~+Ap^*;k zWt=!UidySBV{5WKj;~Rz&!^~UCUHZC<7cCKD47K_u0|&GzoRHp{%z3cDNDoAJpQ<+ zT9UQKQtfO<23Sw%=9!%BmM$i&cBB&E@cks$yn^JaQ|*q29K}A7LY72%o;wF<=j`~$ zsvYmt5M!7jy=}4K7O^9qSj%_{ajG@E|@7o2rnBY#jdJ30naq|0{IR@Z52%5J$bToo8FuC=M zQe9Tki(Mk|I^&BfL&G3B!;VxJ#v?1CRmWG>qk-#f9$S~RM*#&q2(w&Jy$P&24w>cr zJbHnStbClhXz~KvCqEw6Xq~aG4*QVZ#pP>mO?=N=o0gY@)cySm2$>1njvq!|lOcWV z@Dlfm%W+7v*hf>dM|?K0Cyo3exdjD%@$H}dIG2CfQ9;&h{E)+GuIB!KWip(QjAFj% zJ;T~pn8qrs+`9=6-LuplFwr*Z7vp5+0%6TvJBpK)Qm0ZtoE?ebv8>5J$wXHbCcG&xH$AQFv;$CdLL)D69DM~zw3$LDE)Olh-sNTw6oD6~ zwNkJ?_4Mcwa>)#Fh5kKM%-em63aG4>W4z>UYiv-u6LS~_LOR4Td1?I3dyryd_^8!x zIi!B<{46+evyh>OJ99#<0}Py=t`O&*j_$?>L42Zm3qXT1&z>z!Rtqo2JCPFvc!SE- zrdynl&3fvI(CHcnf(C702@&8So!N^?$A&lW3p8H`#wEyuDZ%aX8EmpJ=$ysLny$t} z)umrEHxW0^0PrFOb6drDtuGzVY%rS|R1SWAH-t`>sGy76jJ)6fgIfyt3Q!xO3IXju&+K#uMG`3*@fZWPl9{2$r zJI`TNsYCeET`5Pa{b+j@uwIgOwI}k;b0r$?hP+n7JprIzi{Qd)s0wdRodmMyYtv02 zgvk#k$R6K+4=@miB&~g|{v4?vzN%X${ru?z{q&8BUHMM|0mCIWRG`TSq!6BfRO@%I z7-TX8@^ZRoaX#wU^4cZ7g6O8im;71mcQ=)vn;MN|EWg>zsekjHZLrQ}@O7?4B(+7< zKX^Wq;I?6McCp<^(P%y}@tkr-^Ls6+rQi_+bL3Q@4%X>>r)D@7|FJs9)6n0krNfDJ zcGkTmBVNTrR<@eKKcMqAPZ^`h?AQgK4X7hz<9I`F;?dl5bRQ`&8FQNSKq&Tcxlev^ zxR761gu6b(viM)08vQS1&)CM+MEKt`Ic|8F;9UJ>$CP%uMyM^eOXl4RwU6qe`ttHv zAk#L0epKJ!AOy2{+tAR^nmT_x$neF${NnH#7~-8RH)P9_MZX<3r-Y~y+S>~vFgCz~ z5>{SSiPBbYbo0xl{EG6dO+Q%%(K9swHGFY)3~0}0`9~x_XCV81h=lH|a>#Q|?zp0A37Q~bDG`3Au5J0NBP03h5x z-t4ALW-L$*gq2xQME-;PFMruFbpwndl=(}Xl&&KYoR+YWh=9s@yK31GPrzAJ*|PLu4AgeXGGUHQS%N_@ATS5jeQ0R++%dFjL7lPO zmmY@f__m*Dz->U=C^|BZ{W$M%bn1V;yx5EJ^bc@5FUbEgYG_WI^E=xncYsNlWdN7$ zy}BETFD7Y&Sx#jG2yU9L5o$aEhU(_++bLybF~AE~oo?_0>kIP-(f_`lC?mwbdMu*p zR{{GYvwM=2%&;*?&x31mS*Y4}|oGfi(Z(Ko_t3ku|iP zRe&jkZjaec%+n9oZF=Q+{_RXi3w@fPp^m^CJ0)o0#pNjrwd-F*9;587kTk8?G>iGD zVP-$8YftJn=jJeI!Dp`Ads%g|!f>!LF7x{J-HVG0pn1I?i27JhdlTyQWD$s^+@;Sk z6=J;-u0+OX&tzM%VlGeaIjdOK>*D2_;xG z6Rg56^He&wjSNHT#4`qd=}AS*KNh0=Y#UCRx3izkS(BFsee;u25AABE2)n6~t*=c; zFt*o$L)rKRdV9iE;ZL8YW9ARng%#6U_m{>j+@Fx72oM*GnKt6GTR(o$u65YlLP_AI zJapx*w0f?q8J|;|RO+`r3G27srPv40b?|eL$rIaqG-E>%@92{x1t;0s2xG^aIW{+@ zbBmo`7}*u+IP{R5@2*~HsLl&pc}yFhe5S%$%7aKG%NRqGd$tPvnrv=tac~^gkY^sH zhs&u(z$C%o7gUH3r&4+1vQ$y<1MuEQ=HMBi3gbpj4rpG4#*9s5n$fY#?vw ztYVofs!j=0d~RfIpclR;*pH-_gA27xAsWNr-k{#m)o4Mi1|rtjhbO|7j| zbaZ}*VFD%1?XteopP^c6OO0SW2?^W&*UH`bMhYl#^0Z;J3Jv=KWS&%1RMDI!F9RyD za7^M6C3HxmV${K*Amixszmk*pz(4MGj>n2=geQW)9({<6IYH~U?`012YY~ZSmtJTlSc*i>N{P zOz!DG3k_$qsgs%)vQ^aN`KlONDGp}4{y=ejtf&xRE}g@5cX#&-38AH?_Rdl-CbzM$ zv0fFP;!%1gixL<2{}=Hh)A<+h>hkUH#)#rH>W20&Gco}Io&Z&f zjr!?6YX|ku$|{W%KT~%B5dla&`j>XT{h_lEiQP0|WMHTr93=F*yqwhGVPwo?yK&J2 zZ2;_3A%6bYsVSZB$;qg6EG&)F)`&C&kiL|Z)YOPs4mIcAl9T@(oZ~_*&8x?acplC( z@#C3euUbx9Zw-aNu5V+Lt`j>V(M*U-Npw`&*~;Lj&vBgEG+QWjE9Xed@f4OC)wMmh zeXm6t9v$5bEudCK1;KvjwcSo(WM9;wZJ!pxz{0W!GQ#*lw14T>oLE-#$7>8E z7i9csH555-najKjoPim<0fe~1GT(^1LUO-pJ@nojp*;pF$8$$H=%FBXp#janpm#XD zdjBLYonQ`WSy_Uaw7Yk_yu3hlGWdp6TwFZx9^V(1s-qb(?Joto2FwhqN*r6xQC(?Cx^0@7?4CU>_Hfn{n z6%Xjc$t~f(|GJ0bn;0yLm%C(MU#-le zHaJVPi8wZ;{|>Gk&tGv>Xv&FhH*7P^hyIg==YzJS&&BS?NKdr1wA6gE>iqYE#hDpB zl$Dhg=hduYDLJ`$=&A_KlmZ}Bt#D?Gh=_nLG1LJP?(P)?wSGLGOnLy} zpxbNH&NbMTrOt!$l|@@#R`#~2FP1@j4AxaUGjn*1&+8 zrE(7!x-p=;)$a(v$KiWXtW91U-CQHEBU09;|Cx)6`}pM+zvARX|4{D`3;kQd zhJzGQsq&-D%|Zz3Mvxa?4f5T@`n}ad5ynTwS5m*)1ieyD&hTI|{vzkmnRr`UTQGyV z2G@;*g%wFcoNW;@5qDzp3j4&Dwe0;Qr@UF-gap6-OGCa|7ppzJeCT%Wy&!Fpso#Qw&1(zm{b@lK=PI?JfZGG=x? zhp#w*6CXY`%RPyT;`_<(q&Z)D?K)-FyMG_$V5JU+aW>KFjT+uKW*Ag37>kR4HNYF= z^_z1Q!S$Y&==Jm5$ZQHMnJ|&q(q5I)M-f@FH;i#qa3#@#IechF%jjt#+V10qh7EgJ zfztZsW&7fEDL-_cr2oUPP+A2t%hWM|uF|jIC6t=Te8apj4IasyDDYP>-Bp9$dMl7Q zt3Q`tL*Tf>5?WQht=9mg=>A9F1~JRknf?2;Ei`GiVN9^ASpANpZvf1@wAR|?r)BqB z?{BAN+E{O9-UeWKqc?FeKlg(*^uRP(Ff;J@S5XBH5Q^AhOxg|v`fTz<5A z%7^xwb&(egv4%CDyurWZ&Altl(fRTb1FgS{56>PK+_3+9$(;R-E z&1vBJ+3KlCvJA&ZXg$uXs#csId!k%F#F$-|bj86j5x*nAysWisJr73d5NVDc5=^X~mF>#=|Tpo5Pzc zfV>{U+(-6#;n+zVH-OHer5*u8XijWIh024q!2WA_mC!23lbV1zX2EzIT-6R{aVOj| zu>tV`yX~^Jt|<$pvuk-vMEbb^Dr3?EidTo)AdK3Vtr{Bl!!7NSdBs|8`gr=tfxVod0M! z;Prft93RA>ka5Nz@dYEUw6lwEb}l*(C&~YZ1Tbbpb{NK};AYEVa1p_QLL(v2E(n`w zS{TN*{au(hz4HDgX|kcE(f)v~EKSK~8SjOyGNlqlmNSkGh^ldL9&;b1aJK_?7`?%06wkkBSJgFUiA>x_=jxA)X1V^Y)gE!4i!Py+v7(01e@vRXb75Ke(5J)5=Ohw6B$BbuL% zu8>$m^(utPu+6oFdGS(SO>4)TI|~Z(4`}`lBK-QlDSMNb->SW%^}j^%erHgk@To~y zhu;o(gZwT4p1PAV8^rX?XAB$#*)1gK`r0jX(DXtn@wCQAW(B`ZUKTnn+Pv;RrfAeY z%xek8@MqkZy22b7bY!OSMXC5@zRp_e *eL3AE7Pd4Dq^)s%Eu0($^#%>>$#GBF z`h5LOV)D|`N<~;7O^M@|SZ{7(Q;a?A!x)(VT9p_g86tV;hIQ!Pfm32t)yj7wg=h2d z{G-2L6PKW~+Z}1IjVHSe8&S^~TxNo5E4HPX321LCFDB3%^|hi8Is8UTM!ne6A?g$w z!%cp{kRpB$CcebP);`_d)te8L-&-bm+kZ>?vV+z}9?M7$lSd=or6By$!$g?&R?C(> z5Bnzey0jmL>F?Qqu{GpuQsmu9N?8xLi_Rwfo@;2{UgNh-IEo&2ubiCQ(obDugw*pP ziun}|{#1ior~Nl&BVk_(Wiiihh`Mf{eJ5Dm`EXh4{gpAgN9k{NDnuDi7_lXTlvpkp zLzZQN^|A|ka(@s~2#kgvBn92GW5p?bgdA&naYt5#Dqk|*CEk2U{jJT1UY&KNeE1Qs zF3cNQmDQrF=I8>#s?onW5qeF;lSu0e7bmV0QNDCS60NP~S%qfwrLW_euF4IG2ZAvoWI4OU=3*;rtV#(jz> z%+nFm_OPQ_clpFUbC6HNUnNXdeiq0(c`3>py#=Idbyb zR)W@Zv>Q91zD3>4E}C=m&d;6`Oy$kTDu2@=Sp;`SQbRGUgzwh;8KS}Zr6#r_ikyrm z!Mm#LieoW18Z47{-6-0dJUGG}v6=cm98OLYiV~)`2eEA~^zJ^86I2n(x;j2`%v)lL zlg^-QM0lT)k_YPpDlYnUyxs|4BLc5RuYmTzc#qG`qM}6IIm8*g^7)I;ZyL!(l$4Vg zvs$%F z6L=j65;FWUT{~Q#-gU3o4_x2ZEao;`pvtm#=th$W)h5AIfVneK&TYHd^;AkCp8G^s z8`R!yH~Q>bwq(`~+zO%pvV(>zm0nETP)$$Why*jpg5U7}<1w z8vTI*Cp1w6*^_&}eg$_DW|kc+75ec;Bm2wy3U9Iulb;8pYZg`9vl&@#UDEq2ej3>SE>&{DhT0b2_ch8drqW;T zY|QznLp|!y*HQkLtk}x4L!LW0q1U@pWl%iI(4JcsS#P*gTUtt7afl@g)y6z808#me z9cvC;{H2XGcCnNMP5~xRN>V!h_hTG>$W#bC^!}~E&u{2|{btAiKQCu@Fi#DpR8+(j znhn}c{Xx~x&;Z`g5V)k0vK4k1|7i-66Aaq_{N;-XDA?c^H0DFR&Zs1V?@edqRM|SJ zA}hyA?NY%;#yFOH(OFnnKz+W{pFs#ubTG?+)VyvQpJcVkv`~*d8d}Wxh1bx~tT)E^ zXqAuw^Bz`C;+$Y;y+9R56eK)np4{y>Y+ki87zcXjU!xrff@V$>rNaM&(RmQJo z7qfA^Ht&Hp*4XvftUR1Z%mNd_31xuJk(={&J)u{R8y)9BXpPRF$;m{hfd2md`&On> zBobTsa>&YamlTE)`=3)IzX`z`xWG9e3Zow|=QTf59}p+(l>pRhFX--#B9U8~vA&+s zZ|dNPQ%x{GKaWhy+~c)IN$!?{&wRL!`4>iLhJS#--j6pNu{z|$r%hlnIJy7pS6kNp z`?VIFa>Q6+|Cq;9yKi>H@zus~nKiYy*N=|I!gwY!ytS5B=g&xh(fdwlab$H9S1LD@7XbI@UdyWI>x6 z3QX!+LIY{S0~p%WhcO7CbaP?a0(8AFOX=WfGV>6`#VE*dM3Llct&65-rF>_z6qkQ` zd+`Vf2}!t2U+IrtoSzOCS&)VZozMl5^1y^5mpdrb5k<26FC~({w2AJ2lSF-uyT4zf z2}>Vv3~LqxDhT=QlC1XH+GJFQaG$TB5`6aT7U+4P3J*(2=zyNN3pDIa`qDy0Iedg7 zNL0Xj;WnXlG`+I&%-NY26on5N7+T>Z@Z$NMu(Y+c-90?E4-OheN6VFrKa)-9`gjlj^Va2 zJ*}KxU(YfcoSXu6J}i|2RLw*|60Z9z;!wPT1z>d&ophM$EOx}#j*pWmC@83_@6Xg2AkD`_5rffc?L2*w~1}r8(*~qH8wUb{u+e>zow?9Xb{+9l`N#(2T>{Jiv-@9 z!YbOS9;23ycz&dDqot*_w=o_|4%0%<9cFIBXlnNM2{1Rcjg7t_td&m?Snh(O8~W2` zXqTlc3C3w)@FVQe7wz{jQ2>>&VXSX>TwEKdLZVo8--j`TY2V5p4y@7nU778P8!>F| zs3g6Efrz57T)k@2|LZRFBIdO(PY7W(%4=C)Nkd?eb$_M*HYxXQIA&j?+ujY=7Zw(j z%y^sia+h9w2JJxGU0Vl-dcE8sE#*t{?&ovu>!2eqI`;xu z7pOkp7#T5ePN2cB>>xBm_38NL613N*jslbTf-o8H-$wxfn2AYxR#r)gO6_N!QkW}( zn>2CL6b#i=I1V?3g+Z_w)`HdOvl4CX?>p$do{Lu1yeXabi8 zJ4$EMSYP_LLqI0pz`)Q4AA9xswarQ&J=Q&5ol}OVmhvz$fHX*V7dv5KRNB<^5e)yT z-KOI1?(RmIi}&xZJYrywmXS#YReDKjDPK9K4KAECx5Y>u408iH036vDuv~D|IL#Q| z=b%;xEfk`?0*@FRT38Cy{k@{_<@({Q8y)ahAt2*_T+V1i8BqThND3G;!3M$+xEOZ( zrRPl6CDIl{zuy5#3FF(E$%3GeqC^eI%p;XEkqFfW9MXQU_SzTDvwk|_M8YXupW?I(d ze#X+}`W9#ql zpFNGh@&q=h!cM+8y=HcHR^jkHkphXsq5k0AZ_v@Ho9-dvv-ATGkAP8!D_5WkArWR9 zLRojNuW~u4=Yp}Yr3+|~tpx1wi9A#o^`a&#zTeZqX0$Jzfs3@q7UjMP3^$)1hpPlWC_(poR{^4KmnunQ5 z%g7VHD&=1iLQ$UB%m%50$fd!YU1xm$^Gjt5Bi?*If4ovo*WS@_FsH-P>pTF3g{7A> z=6b~+hd30Hb#vW>x9;D!v9gi@i&=q5==_@*kD*cT9UJTE6>@J7d4ZN~qNqGvTB;41 z09{4{cE{~~u`7@1txj}&a1E+LSH1C%!(0bvXYf4Z1lM`z2Z5)LuG#IMmCdeyun&0$ zk4+_Hb+e`UbC|RfTBY17o#(Bs2}onzA(MY2ls2UP;hc|_UMB#$8rDM>0>XbPn%MXx zC84r9S}jzBX5hw+8&iDxtBEcZ5C3{}RM7(Ig~Q-#>FQFkt^ity7myxcc^n1J?8rwp z3An__f`Xl0MFDR?(tl(fuf_A=2fo(;m&cq!@*^UBKjsnIMn-o)AIm%Sy9$EMe&lDO zh6G!hGLOmdNxTV>T_Ch~6nKj<)xUD}YVqr5_rB9wspGsKN^*ES#vYRp0klO-zz)d( z1lkMo3K-qfLdN$%*Nz^0x{}90T~bZNa=F!JAx1dvWxb%fzVZ>O!W3`C#ktO?G^H(K zta9tlo&C5FWQbF+QxN|UT7;4p@dyC!$mv^-6{)bLYp61PF7KI05)92hd&S57EOt5fle!8G+ zgPvZF)$bH=8Nj=Wf{dPMhTxH8h#?R}A_`7|0`|V$JsD8hS(_9Lg2QzB1f<1)C=qFC z+Tc1LK_|4!d&)J?n^+#hEjk2x$~@ zC1Rfly%Qq^Jrm%f9QE_5!hB=DwRJ^fV>}{!TvF2S*a=|dEBPF}lEKEbNnAcj6F^PS z3cEo|2(5%7(h+2!;n;#eF}*M?3IB{n=Nomh7z_q@9=n`$k0t0F{hUfoLsUg~ejGC) z=&I!bJS%>&j`xf;2cTSS9%=%zJp?&!1}eEnwuFpVe0GRVhgp_ioau^g_ILw8H89>M zz@9*7p7yv9o=yxM+ds0t=luw?&kI`$rL=He?Dp)D!u6fR7@_=;dc*D8UoI|;g<(a> z9hEVjyO>5K;_~ueZQ3MDrBVmP$zrmd5ZXKZcN}JY`T@bf^)9;Oh+Ip$ep?FN1%U=4w`7sLgM>&Fj+ zPrZ9+@h_eOeD1fk9e~Ynl`T1|o*3ZvtmY7MM76ai5K4gQ&$b>7zyb+-)*TZ}XZwXW z&b@vvYaB2d(mc8H=4R3ik3Q3qwFZtYvmO*AsQySmfHj=yF5U%*0+-44Y~PN$!fjYJ zYgD!n{47Vv-XTTT(ASTLjS#e27P_FPat&JmS6ouwOm7R8;b0Dy$xO{P7SXALbZ%+3 zSv7PdQ1@})`vG9{x;eq=1{xZbEj5NvEaD(zVq@j-^inCH+2pIYpsX98sL^#mp3HDa z1G3^cbS1O~@L2XeA9Y)CArvR^_z!ZcHh%r}6E;;rq2#!0mRWwgXEg!>j;*&L=&?%~ zEwX}$VzlCA`Co?UGO(Ehzrj}xJ2+*!nQ01#SdG6bj1^BRqGBP)4X$>*sp$|iuy1*x z|Af{5MDw1d_Gei)aS_}%g28mKsrK>z`7j3p{!JpZ<^ z#gNWLcE?k7Y$*UF%VV|bw0(Y&r!+s%K|5os(T+CK$TZ#mWI?EZKQ!D$A&}K|?jN z!nH^-ft!+ybq(F!-To}jXi%2f+ZybW*@i)P^$j&K#;^O}r+C=Dg_)j$5r>Ox4r|5D zkM)V*?yo?;fFnwoUyd7^o2?QGF=Avm+1c|@Y{iguKW%MCm|+WkozB9oPWkpwU>5n0 zpzys=&>ajmWbRYX_FKRrhy{yzO01Zj$Bv)ys1!WN^@IdQ_Dt63-R#lSWum&e zQ}14;UR5#tGv6TKJyP#-ohdJu(6Z_wW&>`eWpJJ4FIt}>l|&*LN2JCk&Ru>(UjoRj zmY0FUNjnc&zbXpQ<(5pVV4!~r?^aGtZK$gHf!r@b2+3XV`-R~)6}1TM02xGAL&Dje zZEjdUp{1|?1*{Q?oXatI!WiRU!~};!zX8{7|H8XlhFLW^$sqRo#Z^-U|t95jgRX`KCc9 zQNH^rCoj&z*v-(V1u3LUSumku4XfB*2`;kKDMvy_5q;wSA7}0i|=HKxC?(4<;D2F4 zsk&TLw3}cluojRyQ136dKx+hMx&bHv8?d(P>!Us5I#7F;H&Aj^7vuYtTaH*bJFk<8 z42O624iDD=pbX+FvKJI&q{Ze-CWHe50V}G+FV?JiVWV4OdE&%Z_;`X#=v}}q=>+<+ zg46X8+CK+AXh{?l<^(7VIW4_G7ejGCnh^ku#t&Z zt?=3|(Wn9`#QNKBHPqF^Nv1s{bqS?k-Z6npcjz3uGp;nI341C3qIA>cS~M*<2AeJL zd4}7DfWSaLJ5`J##A8`CqCXT-B5A4#$RrbAkcp*;8-;s-g;$eUq(djWR}2k#EgI=0 zF6i0hGa-{+jyc)p&m=gVt79d#PYE0hof(P(O@U?7P2CIk@)!-&C8g6M>u z!IujjC{05c4EAka_d5i!Lz*hehEG#B(!8IWS${lh8ca^E=;Qh+VC|Fz6J`iTedQ-5 z4GHWi4t&GjAjD)AJ+WA#mG@vNRpChvCBnV+NMy59D6 zUS8Wx@71&?Q{TS5e8wcp8QUW2f6$vXdz<`Dg3wowK?TA19=6k+mlKx$zT-vr_GEBH zRaH{jUmU8ig=3ZDRtPWuF#}zNs)yY%d~dK=Y`tjhgZ4Mo)zw^@Fb>U_V5=YA&f%5F zmHC%!ep_{`SW{D8?G(wOL|#qzL52NC)n9Fj_hMruw4QEOP1y9ONI6f}Qi(S1#OmJq zYMp4l;fA(%baag2s=^YC3Yl^B{$cSL-J8GK1WOvr!RJKWn94s2>`fGPadFZ85(kgT zEogOdo9MVKYSB0#xBmCfL2rVv&(d3U3*X(l(7zU@Pv2d6Wl!3v=`6gL86lJEN6E;Q zFS?4P0judg1E)V+{Ex0eS{b)a_M6U5NSVxB!81-SYR0hm{hObe>peU2Khs5jty-ax zo^~BpWjZ;WXqtT(s||hc=FNCqKDL_Se+?3;|M9-_n$HGP)4%qm?pP*atYjvm&aH!o zkVZ^fx1YOvD21-#i6m5HQr3c~c>EIsJ>4BM-i&hoxN8JWo^QJ5Uo$z|#}I} zc{*wJuJbL$8K*yW{lVH{eh0ml&b9NQ(9ZFn!GFci;X2WGUd01VMN9W)QYNkR zo8DW~b#%A>egAZ{<+D-NNsltyVrpm5*e(+l}mdD(8cWlophU=S4)5*bVh6~%Q z@BR$r`pv0k_vu)1!`8F5i|S@Rn^nf(p!PrQD;&L6+;05yti*pncC}U<3ygy(09Z%O zde>Lm3>pYr!S*Vp|H-=LrqLHkg1@4yY-=!`;<0O*=Xl-E+;)8MY^jUXekXRM28R@MZQb(ed`NNlII@%F0(9;j z6K|t#9l{_T+2y#?#^aCsvq!%Zn;zkK>V|umHB((%D3;yp1pS6)XFtt&ee+$FpDUvy zRg+Zq1K!GgI4i) zXN$b#)1d?O^hegqwKN2Yt&wqEzZ#RB9vClP) z&ql6*3rdl&)!FAg-mq1yI0HVi?nb$okNVv&J=eIB;%L*CG_0_3%kTIftJmcG$Q@C0 z+{!&i-@UK4;0T&TFNf*;&*=_-=9Sbgypb4?$;K2+a>kA5-a1$cqPz9uwDQ`V%D_QE zUb@_~zdyFfn-2P2h?Tb6;tO5M%%ArqAt5-4vZAHlqvZPp9El3X}or)7B8KjqfR+Q23h zkM`NB=-&cUQ*FKR!KWs0s|5Ed;3&JbZXR6< z`_mVo^^zvPsKxc-1`((c*pP4I-p{-Yi%PrUU!80j!?VD#-$>6r|1;~eJ!6~Ud*j=q zisGO>xlh1S>Ff%wx-|a!w;0v5QQiwj>Fk>@2^tqSZhtz0?r%(%%|uB{Nkv>!z@op( z*T=@P-e$>rHXp(S-CvB5KK=VemmlAe#17l4Ul)P?0W-Xri8m@XI{-1~7zChecC2d` z=x&PnpB~>rd0~25=@g%X9o-m5f>?S@xAazqu&>#2qBhKJ;WaOKxR2i@d)~%Mu#~C> zU3ID)F1PMQ;Py?1G1*_I-veT#A(6f&yZ7fJMlWqT| zDA!QUEb|oG;t7gs*m`caC-+sf~$<~vTxz@=t9FwTU^b-V*&=Aug zLiQRzgepSu#-sH6~x^BQIjDa^YzJ`?{>rXC*TJz&4HwDEc<`LD4_IJ?IbZP zCHZYd^SU5#XxB&|j5#nG#jg%@M`j7NnUp;sJ^NElHE}Tx&Y{*0ihfc;@jJZ8iWBm~B(Ds%Gy8(TqP^m6g36xr+iB zd=3CC@@r4O_el&+ga0?-+z;?WG)68{{5wb!Un^|Cg-;1&WL8DK|54U?%{1&Ju6LRB z$@H(Rm)Ee>kM8XUh5b`V}(i0;;Q1Ax1w>9{1ay% zAD^hDHFCD+rH_A(h~NjLMn*>~ht;m`ZXK=#!XMz`4RW=z#-B$368+&5e)=j zCICN=9ihWNQvebETPrL9wiQNb*oYXbl`N&6l?=EuVy zgb7#Ig5CHib~@0_0uPy?y9Lrxp^v&coA2W&#-bmgmo}C8mtT-ys|CRvD|-NZ1f-aA z!#CG0Jz;@d$L<;v+(`+$z^&-&dVB^n=D~M!PBk;(Nc$A$@@}3)OF!@JRg=wTuw>;? zb8-5#tj9``S=|W2JMTa~1TGs|lpWG^}PD))f%fO9$raBR}f(__Y#GlNOja2v_C>2)CPKQmr; z8=#Y2il+9OQqIz=1%9(Kkk)W^dX(5}&Cv#s@(#hpw{V>zTP;EkoUp~SpLhTxp2iEZ z2cPGBVCHwd9xpFRvXHYr9(uzlXfv3uVAT?A>Rg9E>lesYkNPwpdbPN2B?aup%>QKf z9J5;ZZ0?Smx>(nn_MQiFCX1(5B_H>uKyJ;6^H}No3cRfTQmW%7a(Z8Q-hO-e!6s#i&{(gIx^+V%+?3iyDGwuLE;CN`#6|qlv z<{VBG@Jz*~VhAb7KW3rTU(TV%tu^tDD{k_P@Ux z5~Z+bQia9GP5mr5sCkGHz?51t;F~px9#WEIYY; z6_=}Mc zX0Z_`$AXu{&157_&Bx~m_;vlbd+UIGKbpkrSNIN9 zo9LgW8_#)Z@$Uq=2S8&7pWd`T7-dkNxtscSJJFYI%t-*-GW29P*_bvkf6Mdcpx}|8 z-ke(_+^o1eZ}Zr&a@p3XU}p^ z64DYO?PrdA?4Q2cdr(Ps}eUGX%$v zq0xK(iv3n5w1J0@%9LSJq{_0?>)xhhpKs^OQfJo}7KeqPJP?=Hj)a@QBX~biR|{uT z^lR9xzFPUEs`C6roq@Q|wxs50VnvO>JyzUyL}Xc9Jjk-lV+wFIk^aeEv@K^Bz|)4f zWEl8t4+puXI^fr8q-C1PwBR&KPdwu*^Z1$G*CkG`VT(B99k3rIJ^Y6tf}0s_V8Y>E zcHDzh^-psws*X-x^=gMJGMzU@Xr~ljU>r>9419+mFR45_lb&$*9erF9|2(@*KhmIa z))S6Ob#GQln==W9Yx8p!4V*yG%FV3GQ!c$86RlUs+;r(scuQ($@TiLfY*i}U+S$kb z%FAgxsT)|wn3zMCx8XOq@Vk~I5lF5M^tfY9IFgGy^_ScP5%h*%+x}#oD~^8azGBiJ zhZ!$oqWc5ikO_rs9mBf$@J)MxQ0){2Tg3aS4g}q$PC02T4PBm@`@J(cv_R;+6TCgv zAq7rYtM;FY4%X1$Hh98_nmc`T%v$sGWp-%5H^7n zhL%!UL>h97KAz66pG*T`3iZN@wy-Ii+kdVSf zTbu)Z#*lXl2gCCMc`8recw|#Y&a`p62Chd*Ux!+}85f`ZF@>~xZiFYt?dDB62Imyb zj;0~WB*qiCyx$1g%EVqJTqFVylSeBq(gEqPsuaVCm1QIBPyF5)rL?vEsMjucdsqX1 zkC7`g#T0mjQ>^YXz2=THLKZh&h!Y%3&()HH$*e9tRCcaZqotN;R{c0YL)^}&Y1V#~ znU_|pfPubq^7YJne$NG$dkuKAxZ+Vh?ff9jMFAE04zr$TTI|GnSmQ5}bc`$1y1+x- zOOvk(uiv-<EH9z3Y>}2*;n8%t1cdy0+7q01at6&Q%8Ld}?U3xz& zOq3JeE>h&K6KM)ocpr@#v*aO%0s;)0&15vyu3nMn;~{~X@v)<6!=XH*ukWs)@2%$k z_giCTaZew|5s#-W5JT5hOf6_KB>K7{#$OS0e2=3N`K-#Rz^SEq_Y947z_fyU2o8|Y zNgL(9avSLxLu!aMrnu<2e}@QSY+1ptne{+zak(NN{=|cV`F;6shT6ruSDw3XHd}>;>3%?l;*0czMn(M#;&>>8rPX(thy)5u(5<=8 z7>^+(N=*!XJm=DwqbtIy>$aa>$!XDzTs4hYq$m2T*QoOSX?j)952ZmgjPxTcp;j|@ z^f@GzVz(3D9H`>Nc5dpL)|Dvhd%LhxCHtR-GKIVHvgr%DTuf*^6kU(eTo}&3t@BAl zW_^?axFZVrPA@oZ9d3Lm>rG|KzhG4TWTl8L}rgCM~X@lK4|w`l-h z3*W>TwUnCGbUYupLJ^cHrAcNbNkOcX-Wexwqia(GxAFT=(0wy^{lDv{4ea0JIO#%` zpNVD$DCy06TbL9Zi?Bj6HZb;l(`Hr(VP(-w)si{zFCE#zQ^S&^6cCJr_{ev!*xBS$T50%CoRh14G{3|_r?bh6 za3U7@ADdN^XiDA;Rh`=Vp3*OfAgDFmz~T%8ujoZyqOf z6_=m=iyKn9bp2Hj_CjGp7t8~*&@ASG4{Z<09}^a~d!jmBQ0jr78i{kY*jA+?Djj!;!JOlayPsZjKw17uC$Dsa4oR9*ZMKMuvsV z&IaIo1+M*H^T!1^bhzAz0$}>wZFfh@)a*CxaI!S$and&fU==YkB3`pM^(qYw2{A1Z zt{EZtxJyQ>TB?u|jDkml#u3lbHbFBKN5Api>3T2;lf9JZgP##d3Va#W%-=|5GkdK6 zEeaw#f&sd))h6cI0K0)LRa0~-n;L(JPGf%z6^ zBG;C*M(yBeBRK+Ir`#IG(Pz?+rW&s^%&Ww#y-#l$53gL7sC2&)49@9Ec2QfYwOBkYB2RASJPRwar;IBc$hp^k(z|{X&kP`PEoGEVl@c*OVSdhfYWK zk|dml=_*)93WCWn77KhQBGN7>^b*Xb(aeAM00nh(zfL0+ll3xaq#d7&cwMMSWc=Yx z*(FbN=sv0vCuB~tUy0<$-9@DqB}g1A!)O9avpn3h$Dm9RCn_d{wFYWU^k=mOM_Lu? zf<{jPXM^=;JuP$muW+PfR_gD=!kh4bp?bA~LT@&+f@-ddk%2yjue_cVE>qj;-8O1m zRV!2;!sJOws}fa9iV$)~+C#c;EOiiG$I+6ZiBPG(D~0vTXWMiqW@GkL>L%Y21>sW1 zF@(oyjC?ov$0C3AaJ}`^9YjG>N%5NHBed5Ge59=Vdbr&12XPhSlZxCu;l<SALc9 zGW9vZ;RtJljsA%NO3pZ&pHOMIW=X^Gu+obEPs(S(%WHpC;V7&FeQdt000Y(Tw)P!u zdU~Ul2L-db@+b|;*SHNEDJTo1Z8Tg3k4Y0@L3I_6=Xfq1?Ur=pFlb^Z0`r}{|D{%a z{?ix?U-JH{x&ueupar3*$0VDC(j$9Jh8=E}Iof=awjqW74HuV#JC2l;DH=7(;O$9w zn}`I^2l4Ub>AFZIK2p7I?^@$Y)o(0`eo=gk^R8|jQ;N*wa-_Jn1B zQN(M0MzmCjRq@YTT~A@{;ofOp+*E;>Zm+~A2m)ER%Q9OVS&)Hr$jasB)cB{9k%G1d z!X#KbdK|e&EKz2*4N1dW(igP)IOVhx4&dq{Tq)3Js4aL@%nOqZgM$BD;i&8T{ynHX zoM!6jciU6N(e0WH08M_mn*4(xF79HHWf+IDY^(EQTKSCx`FoVylEn|2r%kS8)992| zNK-|NUB+#2MFhj&qYRN=59}Z14;kIO(bNA_$KTb{RHEyAsUh^GClWPq_87Qy;5rll z_uz`7g#wiBs6J4ixeud82vWGP*eMVpGLse%j(FB^@(b+aH)`_*RABssZzH3c>x^qG zxsP7vNcISO>##8>lqkC>0qcx*UC|n4fhtF3+%g?!Ul6Iuv<_6~CAs1YsUWOl3dYvg z1xj^~feV}GCp@NX#jT9IaO^a>y4W@%1|lhgyn}czGb%c0>CX^w|#?x zZOQpJA=jRQ3Yn}iZ}%IWB#=oDpBLw6t2>lmuxngBZ{&K}9-!l?Y^MM*h4gwq@-S={ zhgxVPuWKGIRv{uw18ykLfe20WWVsOVy=CmjGf1q%wGT00Jksz+aA9DsJe~o>L@QLO zgDHg@w-mA05+jH6rcqB)AvP)wX&fqMt04x_q-6C)7+uJ&;^BA0PrQUL1j{oMJwkd# zCRRLRw&QT4X(%@xH{~wioacMUE1&0Vll?eBQAXe6r8BJ=BMv}g4i4_+M&;XDviQ(V zE4xwORTHknaIu#kGq^Zt@2o{2Yt`Z&>*YaEfZod)_qR&rCYn!;Lz{YEc;q_bJ>@%e zY}jt+^^T=*Iol{+AB~n?lcmvduDo(J7|z+3%oMg33_dA%aWpK(Ff@*{7yIw7; z+{>pFJZ9#x;Ws3$28xKwRnJW7BqXr*EI8f=;g`7?ie=ZWLkE(1irqEHAdlIE+&ZzX z)mTi{P$q>i$fjMN5w|h6*@TTi27~q)Eo+i@eqjZ@tPVkrTT{J7|9m%!2Csm$$mMm} zaT6}kmQ=gG6OX=2(HxOI7rXdIjm2bIwOBz0Rf-CXPoQ)k^|gAbtJd|x!KpJ48yg(= zuGgZupAe&;(kAoh4O+Ro*&PN9u0x+&;DDQV9P6n4;6YDE)Y=&1hRIM;5}d=@V+n<% zSF(2+BYoF8`s8**&cWaS5GeZ z%pF;jC1qK6E9P>NlAs09)8XrzCeeZ{xR*8am>;w!FkWDu(O0}KcdOj5)La}L8^!$y zuUbIJ7w-ND-_RtOGk%8;n8)+4NRpiB+TJ=l{sS5a{h)z(PJp^RU%cvn_$A|+qV504 z3{^y^gzZ4%a)K%C%^jsgpH0;*`v^Xnm3CAaL;cRp%vQLZK_IyjEJu$kOW)u}&m1v? zY3)$)`_b`EIRIgWwU;)y|9K#N1^oeir@gyD4d#ZQ`clac3&a$hYF`h|0g(5~fgd;Q zit%?_F8!UxYRP^(J}nc0iv#*KZ?T(*{nPBLI1xbIfi_i5C+iays0i>5>a3olfV7+% z9UVQfKYw1oKkw#J0jUV>fI}081p<;rH(8uJEbTTZF0Q~_1AFr~cpi*ry%2=hKd9&s zN2MKJtQ}bp(t$yeJZj0A{H|j{eaT|?d$g^Mx3Pk0hOwnAa>!yYO3nCarB?}j?u=hK zQzbam;(PrP=F?J7zjL(ZJWhWu4#7U(e)VL4N)^WVrv0v(B48FRy_e$wgFJRCRnF4^ zk2!B}fadeNEnUIOVNOm?duN(4lbp^qC&^!|YaS2iDN_hDP+APX)l_0nQtBFJ5rv>{ zFT=9YCV$g>iJ7wkK1WS!38{dhX7YO`%c$b^@Po=MeyOS2DK3)&DpX2f=F$&wE2@QF zWq1PmfSQm+>iO==53l~M%Y~i?zqw03Zp~Oo{l69OmWQZUb2WSL-jpH{c7wqg(AiSkO zy}}KYW;g3YL-x4oLsPY@sqk*|*%*h= zRF1(*At9U;B%kpc&MD#JXf(I=w#-8~3AmQE9^;Fs1Yy(hk&%%TS?qeE#WQ$C_PHJCl25W%~37;+JXD!37j#Mf0w5i7xVn`V7Mn-y?yph*eZ=jW}Y=~}mH zAvmt_>8}SJNvqy~%=g}DCg1c}kog3NLZA~!XL`@TFdv-jAk{Gtd`4j4ye<+^Kl#W?=ATy3PZ%tldqS zYwd>LgO0P+uZ8w^AYcND-gDZmQ`n^ReoHXLpVh%R(Cj_8XK+%d2UjiNF&g|-k;PI& z_sUkh(*>=lemuSiy|`_fpVNZmuaT)h8qEieYAm^G3ku}Bl0-g~0eIT|H6v}0GQaE4 zW9uwnuo2IjSVD`-Q&eCg_=v2Y*%-cRpVTF9UrlpFjQ5HJ#_} zSs?UqZl#eX2>^`u2?$ot0Ivv|Vtzx)OvfdmWJF5Je{QQZ6&YdZ-!R&N(LV@b+b!`+P((;5|u~0lU5y^5o>%U$0 zX9qipcfpORIJ+ zpUyx1A82XM*{Pz7fqHM==XX1`uyxi>n|V3bFc{lQ2}>tZaZ9IYdEIk}P;O7V#)!f? znrXM@it&-t`j-U4xD7`c;*Vnrn-Yo)Aj^%Vs3`ltgAq_BlfmlbRSAkwh!H0 z;h}KD7QnC$EVvX*O%q9hr)f;i*24^Umc^~{P6V*~yJuNfKjer367~g8o3DQ8cuGz& z4K*=G&YybfQi}A+UyY0b)M$zS>g3vd`WM5YR-uMg_V%7?|mZPF9Bp8f+P<^&)PIHNRe@XS@J6z_unW-z$C_=p@X> zynF-fSu7kG+=i}eN)hW}4O=6I0%G29wPlsxW~#mYvgKDODI-@$jBIgXrB~R+l^gU_ zWF+fxN{{Qs;>%eVgX=%VcQaqFM0!6Jv$Y$-UW`F3{`+IQ7(5_x3_4&`@(c-^fjFHq zM~VA1T2hRo)=T{@Vy(;!f)034><~y*XBeoG;hgiAPdvhrJOZ-o-fUP<1-@d(lW-Iq zMX?iyGG}jXCOA=fm?;slUezfqFq-<+1J#D!BTG*u2=pzi1YERPrex(AghR$3XciH; z0?+hi2QxLLnM`hAU6c!{^f3?J*4r=%?c7pf_wEf5a-t17mm6Um3F2nj@zDcejP@wE z@c75cH*dr2@t_uLEQOzU4!(?5b3{dl&8<=d!>{psfTq$|K6`L*S0?OJ#rvr`Pof{u zQ9ajn@~dq4aRd90q&9HZ3)U4l-`(VljI{->XO%FHnSV@4fzq&N^{=M!erQIbev^z{ zD1Pach8^J0udsH!T6BAxrl(c$>JqkLxi*p(f+}xtafH`eFnc}>pQf!X7yR~!O}oI& zyrux3i?j~5uc2rfxUtkWqtTsu%hl6BPvG)VZL89d23n`?zH{qsjRkLuvLA-}OmZqg zaCXJVP2!rbFajS`oQ=sYNaC^Ur7SW}h)vKD<5t}61>eFxBU)jxsD$%kXhxfhm>{K{ z|NZ(0m5}!p{9O442bx6i^cV8!IAsmWW$*e^2lfl0*LgzBW-wD<1cDA{!$Fm%0WEU9 zNx8%QBwG!FKHCN1sL77I%4Ue6$hu)3hc~IFOp(b(azZcL->+hnDL8a$6XfhD%|9$& zfF8e+ILKmCdGYNyc|jnaRZw2C2F8)a;Bn!i{cugkB&&TqGo)nj#xZ&HE0sWu!@{vh^X1Di!8ui>mjgQO|0po&7^g)v`%fx0| zBGdm){^?u0N9a}j!mIj$)pR2sGf-ivv$(YZ-5&m2rD-B`_{qPUJ5fdd@ubzu836t6 zWu%qGjYwv+3a`E`3Qc|V z2&Ier)4^Y1BEi{1OveT^NJ&82H+yR7=tQ+dEAu_z0<8>tcIp>fsHQyZYV7z(DLq*hF0ps( zCLwpc)3f1E>bz3(pBxr_1BE|>c03Hm+fhuE15-DTLXoLyO{z%F*NfhrXV~e;<$g8S z7njnXlW(XpB(f_26j2YumPyM=7aU?SC3SvvC}5B~05zJS^%m z;JJId2hl27G_C>(6Qk-`AP{EqXO&(|U=*L!J!kVewI~A!}Er{$i z-NkDd05w{&800e^PlphK2HTvoA}?M`HdAV#JS#^Et6jx%at*Xq1|Yqzg!SbbOh(3S z{HuserJ7k~!Ueb^12NdT;VVB8*m49)Bb5nn zn?m3V;juFB^wJrt^K-++VwHvHIJojPKHRg;sx-_AwU4>`{uN%RfT!hPk^u^ijuR)~ zzwQn${|&1*&sn(7Jxm?}ICN3*-wP4L!EIl^3OB3fid<}$EUc#^!Tyx&V7AVJ0xpFX zel;UkjYG5hnXUWsG`sSdZg zX)~ev?O9TYpG(``#H}QN7P_wlx2oiqPOQimMD|1nqd8j9OV&g7g|%%cviPAWv&7#Q z0*FAv=(^BL=3QTjJ`yHnhPW1bhd4*mprD&7$imexcGL`17Crdr@@-vK)GKM7dAswH zA-Ju-ANQsmbD2J|!!CRgjmPV`o;}&tQ;Kq{!%wTX`?K;w-48!}DcZOmkckmwF$#Qd z`S^HU9fE}L=&no_2Jy+JO@;(i0^ojUfZM;33S&Jh{&g3n+FwbZqC3km;QIo zCH;D`D#zfP^@q8qs$rM=)8vg|WkBiXzbt5ZZ`or+@*}7i6NK)G+mGY`snQ2bLD6pd zgvPaNENbi|X#ViRj)%)G%)k_KN3U!(j?-Tye6ZSjzTvxiI4)wt)jeG17a`Us5!0Ta zmAlEW{aPW-gM_g*v|{~owAO+^2XY+CIjXVfeD&?2dMpCEkjr+bXixNDAxu=~-3_OY zyW`;BDL|9?0*TzNUnIt1r7u}mVR!UgE&&9vK=Qhke2R6~74(S1oX{O|1Q#hXsA{Jt zYLbmj5yXU!{F1s4r-tILI0d?s#W3gNe1$zH8S14!radc<3CrhqFM(T`i}c0mDa-QN ze5WUZ@}}1)69QvAtS;ih%`M}rhHcvHUPfu;V6gA1PN|sxBdMr>(mN+shd5w&!t(4~ zyaXiQ?^06g6*oJsx&Rr>0w`2TA6oP5)*T*8AWQC8VS- zNjHJv7WbyY+K2M@{?<)3;O$6SaUYG^hM~GHEGdhwHdR&a0xjee(3owu+JDIEeGCdc@*>dVB!n)r27F(ktf&H-5b_3{D?NoXPirJ+3<+w(oQV1XTE0e`0%ZU!c{5mVJhy0jn*X=nQI9UuP1pUfKWyANwm(ql z*T}t-$QVQU(!f9I%KSAxsuBxQ=ijMcT`by5)YPQxBvM^VIZ#n3RW4 zh~Lf8Uw>Cm29-wKG8*EedeWO)OOM7p&@wt;u+}O^L7pb08&>9Wou* zpEs#?WjS{4XcZ}c?za0lKz3-M_hY1efPQjETYFU>dKpf)tXv|?@E3ll>zy*UaT6y2Vr>Nh)*G7W|HvSYgJ zghTYuS8&h%EhxjouSk6$5c+TB#Q7cn>g@0DS1?Kvx9+K5FaEac3N&4Asq@@|(%2vs zg}2ND_PGP_fGo-7o2@zZ!=Y_lg_x&59&wfDbqSI2)V4?o)%NjSp&%oxkcMtqsHrv3hxTUV}4qG^+a_6nkY{<^swY_oU%26(7oG zRXtj&cIv5XaK7SKTM z+n5j2W&>UM2pkC`AhTKG#2Xgw2*hgy@$p&fH@x~EkVczme_`qpc{e}@%lBY~|R_3ziZEz&Cd)#J@G z<*XZa?*>i+MAao9cUq{bYqp9i2C(H1yY2JRynvMR5fEm7{F8d2Q!fQ*_JryuR5t@h zR2&&Ym&37bfB;PHsTBW!;bRo9M>`O{V6P;!vQvt?al!+TLd!ATnK9ZW2x!v2eY1e= zz)#lIllhPuRyiWm%x&?TQ?Wrhrs4)WiKF+h5(N)uIYts#X*Upq?}It0t(otZ#|up7 zx^OUyvI_2AAfCl;4S^FIaF8+LedY1St*EM}XKw8lpJ0VM6bx?#m?G`FA9_Bg90s6fJ346UsQW z%(m?79)fHG$kA=z<2=QapCHwMWWFu?R#CbGPM_G@AA$;%ZpTofM)*EF{YBDrST(sAzzgy)F)Q_+tB|pVzN% zsZxPAgB>UTG*%(}9l|L-UXOS^a#H^m)x}9+AJ^r2{v2Sy56Hp&{tm1Aoc%bOJM&A) z9lUi^6nn{D7&+jOk7L4!hc6c8oxP2dG31`WQWexjV?Kzm4cKYZqel8FWizw6L+!Mf7+~w92{`r>q8AStGTe4fOOsD2yGvdyBy*MY2jb zVFvJPhs&9fDMylDF79^Spu3N0W&LQB ztU@J^U3Wjp%X;Y4N(bZUs72a9RFXP-f^>9pxrPF){>Q%xkABvC+7*71hr)(&72J@D z*lT>Cwi^uNz^lYms_J_b#NFL}sh$7o#>E>UR+TrTPU3zG=Q=;{c|9i%kq*8Svtohx z2xr?<^4tD9-IxMc8n?fu4>76B%x*`Y&$^j( zZ=D=p(mg3@nmIy13^%tYv*~6ZF7DP>CQ?HkkM`55<@Q%Q&rX^TN;K9a>sZ^0y8#S2kEoFIVGI~ECPs{D6x-yc_W~_=plV>KT z|2GUENCrw82ED_^7f?I@wYScf0kNtQ5VwHnv@+>-mPrU1nM5#>=z9s9LF~0+9sk~L zlRMICi1W}UDO8xz7FGqj>5Gt8Q~M5ZOJ6IU+ba2Zx>s&3NRSVqQ21?XDgW>D$CBE$ zYW1^&2Y42f3659ev+qg#e#TGCe)@gpthoHIHL=9bd}rf&)94?0?m8}XVoMC$L-9l^ z2zmjSxtuO4e5T>=iVv|HaF&ZbQO9CEi;}Guaawmg>MrM<&mj(&G;sGWCuki-62b8% zj+a!KtFL{_XL|j5xP$KKL#T9z%JTigYExo}qidg`<&$&Wz{3Cb!xC4tEMB`u(#nOQbMF z_Xbn)w4(JktjAKV!gDa<{I?m6h~rdw#UW5MYG3*g+@nV%n@9UzEWcerT`Cgs+Xt@< zMmrgqGqO1UaH&*JT$I0CSFtN&s)2H)*?&tIp3<7KL*ZGbB0SIDu4akt5Ke}$M}9xU z4Vu(p4O<;PEjyNb6dXD6TS-!5O}PoP!OA~BT-_oIFo;sTNqynt2dw-%W;AG_C2KPU zMQ-TEmosg3zJSx=>k)M1IGB{jj@*MM)|22{)W^clpz421UkU@+O{ znHoU~|CN@rT7y3($Gk@>JgM+faP$(WQv(V0H_aUtoTu|M2l?J3GvjXk`ODcd_u#*j?XZ(yELfZCOUx@9UV&bf(f0a1$1;x1^{WP$7BuxWt`Fy8P z{K1@NfHvqsTDj4b99MqyRF%?%S-0G{?q~V? z{p*cv&TrL9+zb=0?xRBQ2;W9 zy{^!!&UWG6V3zber3(#)KT@z_kcvkukmDNKbrficO;g^^r==l7FY@5SQ{JPp*zr@& z-}o(OJ=8hcYTdFoJvzdj4|n}pY!_d&KbU__Dmvi5HufMZlZNGQzvzX}>b(j6zco!6 zd^>~<(7I{j_WR~}#yweaJ?IsNM&hSGUfWvut$wtA`5ZS`hQo&6fMubYO^30*KZb2b zEHF71&8CeH?OF1K`{Tb_&TPh?efnf#aodgD7K3IC{*=+a^LA_dt)|f3VNtT=eyEq( zDExjHXp6G?GbraH>04}>4psGF#(hIKGJH6gi+NDK8qp!fp_%)E`SLemu39MI zK^a0#8A0Y}OQf1*0?W=^v_vr6Q591Idc=x|8)-K$LIB1E%~Bu+Z*PA*`*r*5sW|nC zf{nAH<4UjXVc7p`0mAP|*lvj7rdrRQ{w7E#3KXP2^RhzxYuNaQPxG35_VZ=N&6DHn zDC3Jh$`E$FX7&Jrs#D9mL)ZPSBAVcZa>~d%@^gkpKx+aMaFxRI_Zv=1+jb3O^MWJ4 z@7&+Aw{8dJ<;O=~%ZIbopZtWK&)U??c$u<2V|Fg{?X;R?AX(m6ak&?KzawAlkpM&a ziHd!JCaQ(+`C78^*+E-ac=y+%t;bW{>cmPcd5Wh8+&FdVueOJNWrhNno|+k(H=dwz zz1?~jaWxpRxczDW*Wl?%xuS*pxXnAA%Fa2n-!HzuUrMwn{N(dP`V%$O^sk@sj^Z)b z&Yi+{bQr#;+aH@A9VX_o2Yby}6@J_H7TP)TG{_Hbky=m}fSqV5aJ>5w6HdOIk6`%o z^Ij_p$%Sj$55n!8?Hx*d_7+`vq_Owh0M~f(uTxPch6@&WK9mNm^XX4~;yG~$3K^VO zjnyhExpLg@>l_AqtYG51azfp<2JB+V4^?gb0>h29Fcb`^9pig!pJm7<%J(JNoSn_9 z3sM%oY2gT_B$f1g^vCM*qv0&8ki>Q{AJ5e*e=WayH*9 zA-*A|3WG-v+tEj7!g2S^&3&F+NZAMpP@DJALZ zJxu4ZbyCW#`lUqoBMKKzfmY2XH<5ai$!RqWOArD3Q|3Pbe+S^Og-W#VowUn*OGt(7 z;OV~q+5XA?+}yszW;S+Nhn{d39w))f@lP}wDW|>2&UfT@W0CUAQ(y?acts5#Xz@`a z2APY;i~CI@j&{B*6MQ#%{xo7!XP>Y8Djqj0L#5lsPa4*@yir|j*L-(cRPQ){+{{8Q zs@oHeOo%u%=pSTMbm!14DS5`j1BgPC?_Ylv_@E#_x92h@-8zg+U8&I2v3w4KBkn;@2=xUv4m*31l9@SkLG1?%j6myt%-+g; z|2}6dF=y~Gdguw;lY#p|pY0^{ZUZY!63ZqUf-GLalllxs2D)5I${&C!?S`%T!jJlE z^_Rf}I-s(e=crK{{5l%%NAGL(4`Z#&7E_B43U+@bJ3cBAEkF(#zOHvzkhQ#RoJ^o= zT$ySR%Yt_B1nD{F!^;yhI}>!bOf2{q=KxN3tv1N#qbBTehr3M#$c)@_n7p_xt4~Qh8g9hIk06^!a_DKvLW!M2+xu3uTJe$pd$+LfH{64X(%(3|7 zH9-y~F*ds>dqPdAwdM#>Bf;mg@OET#qvD!a&5Ahv$g||$BrA*Qqhn!9xfY~w|NRc+ z(vI>K!^H~K zz|%i}J^i*Qi~r=Ff4#BrO69mqXk%}*gj~oBxha|#Jsi&%;>vrE_RA37cFdv%i5c|# z9gt6*-CMs^`a;b2fe<1-XFdK*J^z<`p((^LWs?EUiO(&`C1&DsV^Nh_A?>-TG4aCh zr2c5~AVdln<8ozfl27~IR=lUmO<^9+1YBHc((L0q+yUU{%T--fdEEmid z93QzUqp_E?rKx^4iBTjOQmFASr3fIbpL8e*sJS@T zATbk99sAmM^JGwBq=2@B0gZM56K%!%B|wL^MBT@kDPZqi_JT=L(a57tF%neH10fxU#WY<4lcs#<4l{Cc|{(8|PjI8kcv8f;C`)^{*iycnni} zrpIgmuoj0+_+u+1=#=x{!BwKI{E>Z9N?o=@4 zsf#~+W`;g}fn=OEmEzPqKE}uO`n@RT?r>@~d2%N$F%0d%i_4Zmmw0_OEvnlCXy=EoNpn<Tbd(FXCKf{nPcnM?G6EwFf`!UMQdum1%X&8PZi2>)VC?+oz-( zgRxo0qKF1Rt|#fZ`8Y`}3Cu^q#7G+^ra#zkPnEBQ&SvWiAKJ9j2Wu@Z9+)%i^DAXj zri(&W!3;^o<6;w$1nuVQi5D!0(BWakn(Q&&k>%z;&&MY7ROdk~pL0mIp`$eK*_pE~ zvIwK6o?()`Vh-Vt|8=9{BSWA5+fq1k+!SEnR!$Y6{qt0Du14pF*`>W6`h*;An!k$2 zyFIK{W=^kNJFZQhB(JBxVemlOOZWNT<3`PgEYw-kErIq@@A70E{A*?9xDn)$*9>`a zVv8P|TKhZYK@u$*1`A2ryebFDerBfS&??Vx?^c-D5l3Q3fVUT-w;rOf-|!j+1DW4t zJau0T5|xnmZ?JTy7z)#hG~2%V+AV;*^qV&|wS#c$W7ppE9;Nr|ZM^j1*{+{XS}mxn z`5Sj<^off68asHMpYo zN6M_b7Y)su}6 zqB5b%>3gy$P@n3i+%1c9C8uYd(qS^!*S)GrJVctxs3JoGt#xbu$YqF z%jy-DmRglrtG!n7zjQ!@K8h;$SjqcCua4ahh?KM)&+{!YgnZ3HE-2eg9BsGeJz%$jj6gG=`MufKj! zv~^=|j(PGTYDpn>(eOwRbqC3}n z^op`m&Sfl-L$?jh<+dC+sSJW2~{jUttAe3@~O8hDF9F}il2 z^3ud;pnT#D$D_S}+=T0pmR7ysq@=lUJ!@*eZAZr~7x*cHZc3A*ktpQ-6`<5iBQ$FayxY>w9Hi88msCr9xc zYv%&iyDfH(|2E#)WJF={T?|e1f9mP(5Hv-QZ5yrrw+1SXpB&B|KUv3hX^npCSe6vx zQ#km3Da>G%&%8-OpEf0*%$RL&nxgU31HIs`C%0JsqtLF{)GW39l=1Z6L1biPSIm9W zZ^@E&(;GhXRutGawC999#`C%4OWlhtwt2`oWe$0N&1LyP!ljy7C`D#o3`65@zQ*qa zR;x1mi|qpR1eO+Z=r3=4$+`FSF@K5&8Rhl&A+~MN<7;AOu`8QwVLrfd0~ z$>(J0?wVyIOwH~LKlK>fH}3G?FQ0#Z93RA(dS9EZt!$>!<(KjZJyP{#9{xV@Zqn^A zoY&5Q_b|C(ql})%qZSO_(MeaLWAEql#GA*@9q#66h*+fqI5LEqzFUO{Kl0(huw_Id zUh=nmgimdeL;x0{O1!W2Zy@MSAR?T5UH)FBML(PGMcRpl?E*@yW9p&o@ONpyU9tK< zb1#5l+fE>$5eUCzhP%yTr`T%z(%8tB5Is$|tPai~8Ya1(W@Sl^s@+y<9>hfB(%P`0 zF&DV~KKg`RD?;~weWFOqtU!Lh7k3?(`t9BF2{LVM+nUPZE&nmfXQ;yxk>hImH>_yp z@K|vcF|+tjZzzE!QwK^5>=&Izp52;BuXcIakEO_e!=N_hV_9JRnHzz?md(i6Ezc{( zH%M&Lu~&Zg-&;O&9qr!2He>iM@%M9rYfaK7+TrM8=8+1tEWB^X|9e|~{P$P=CEeMV zNDQw)dc(cfX!RH1uYv@BMU8xL1-!SYgCp`u#fX4ss)O zs?y@++OOMe_P#zXZylOt^?H7G5OTi+{N35p(Xp#P9P(|?t+8)!C44Q{9dw8{b?cP) z?-LDn!~f55OH}Rj=Ih$cvFvJbQNalBZ&|`0*DYQ%M%rGL+diRDhr|fli2h{D53epmOSliRmJbYJ|z^0Z5y@fQqf4pa4dVKuOG;ceXWKy{!~%!InRLelK)y+P zPTuvYrFmEctCaSM+*_An-;XWPRr_nL&YGHZ5vEbUL=h!Lq8A_j=%4GnT(kG%!Tkr` zB^F0JRXoE&TQ5032om^~=av%8qwQ=l&R zIo|s4KM$cuHjIYD3xf0blJrih`+f&0mjt1h>xc$`!-KTY&6_tr)SMq49Hiw`+6Vci zbBbP0E??WU{@&igdV20aGhhftK_;!2kN>#0jQpz_d}+W~$ssaZ@MDhy8|+i78cM^j zHpLJuCX%Qg8{Jym(()yg^+&I8n?4V|L8V2dQpd?YXX2ojR{eN;z*FSkp1a<#lOlDe zny1zh`tS}2<=h|*>%T-C9UrPq<=d^$uc{vGoVMl^KoG$N0ZZ2{fZtpE8ilu=hO zk64nD7~zzgdXJeSl`1Xu%;BE^BF$2dU{_o)k4Np z4*ngjl@=6`5dWJT16goL72T;?g%RVO5BHhzy<1918-VYMiA@yFPDf+-5UNZ?fl$jL z+R!_-6n2uGk#qS956`25&oEw%{jv5aRLtYo={I<(4z!I)SI0xWdQtiLk=A&9k-I5v z_{W$=ehSujX>D|KR|T!zo&qBBEe5|}VDN1u{=3{{CMw;aMX=i?o9|kl;fD_&n!@Pf z=nD%AkM`E;-RHY2vv=THL4R&L_6n!MQqlX}6$=kec&gJR)ha+1DB8EhfNF>{%!}ju zK_!KBBcK@4^YbS1bNYGP^8R4Wy&w;F(b1z3iLMigB77xj5_+8kTS}b|i9fK~Z*9(+ zd>0=16DYl!r{rWkAf?wi=us>YUE|Su#$we|XfaX+8`$6PV`ucJ=Y}&mULwVDr1q!X z1-$cOaVi>xWsK0cb}iybI0~MY=N&0x9VS8h737*EfCw4&IaTCnm3N;nDP}Y9YU!p( z!Ko^FK}00(6Bo=hJN42odiGL3;>wKw=pxM0Im(zDhnkkwvMcZZPPOleWWri68)%~l z3ZS}AM$@9qw}|7jd7xV224 zn$N$RqgROvc5hL5P^g8>;#7Vtf4#FISkmj<`SfR=Mf$EChd z+H8ZTdO=$6!F>{-X>05Cajo~-k0cAPlqYpdM4;(hf-P3J_i1gei8F$+Sn1`fD@q{0 zxG~epI+cyzPk(>n*EBWR2j%hOI^@UHZf?vzvJd~p}>5?qtbAih)v zE6r`DrFmup9u_D`a=vU`T3RA0U6H^Fem^yl?~01L_U;BT7XN`c`%4x7Ls~ACCp*7~ zpgD2?LR&G%4*+PhB;Z_=Icls*HD>by(tZ9=^S!{tK&{uPv(MW*ZiIHebcm9CW`K?T zPuHI15$YaZvhk|wa_SRm0 z;DY4d7@y3SrJW(^%gPR$t0UXLy&5!ZA;hDI?v$ zll^t*RzI8|?qv9_9)RH9>opy9q{2O^OG+#vVzVr_cJrKTa$`OKjX;W9bNK70`$8$? zQ*t*c3}{o{sLSLlrrJFU0wUznC*Wr$5wm z?c(UUm8`ODeG%q4KyI4sosvrvr=-KqHeObWN-7JJc?L&d}MHJ(Hol)Bl zp3x+)^k0p;hPWWfX=1xRjIqb6PbM+@$UF-);{2#qpb+YB-mva5cdm;U)p5KVIJ2Kx z*?P}Y@RJW@^0tn^8B$)BK_TMjY^21IzouS3mh#r#_Zi^e7x9y{+W^K|u_~ zywdyru`-*c>E`FB*l+@|SwHbpC#+a=y{paTG=_Blmz>snV<_m}rn!>K32qmhLTH&z zQC@HStYXqX1gx~%EW$$NH8)?`kz&W7(G3#nWVpyw9XVR(e*{`>mpy+Q4||f5wMO