diff --git a/json_tricks/decoders.py b/json_tricks/decoders.py index 63aa090..67336a1 100644 --- a/json_tricks/decoders.py +++ b/json_tricks/decoders.py @@ -275,8 +275,16 @@ def json_numpy_obj_hook(dct): """ if not isinstance(dct, dict): return dct - if not '__ndarray__' in dct: + if '__ndarray__' not in dct: return dct + if 'shape' not in dct or (dct['shape'] == [] and not dct.get('0dim', False)): + # New style scalar encoding + return _decode_numpy_scalar(dct) + else: + return _decode_ndarray(dct) + + +def _decode_ndarray(dct): try: import numpy except ImportError: @@ -297,7 +305,32 @@ def json_numpy_obj_hook(dct): else: return _lists_of_numbers_to_ndarray(data_json, order, shape, nptype) else: - return _scalar_to_numpy(data_json, nptype) + # This code path is mostly for 0-dimensional arrays + # numpy scalars are separately decoded + return numpy.asarray( + data_json, + dtype=nptype + ).reshape(dct['shape']) + + +def _decode_numpy_scalar(dct): + try: + import numpy + except ImportError: + raise NoNumpyException('Trying to decode a map which appears to represent a numpy ' + 'scalar, but numpy appears not to be installed.') + + # numpy.asarray will handle dtypes with units well (such as datetime64) + arr = numpy.asarray(dct['__ndarray__'], dtype=dct['dtype']) + + # https://numpy.org/doc/stable/reference/arrays.scalars.html#indexing + # https://numpy.org/doc/stable/user/basics.indexing.html#detailed-notes + # > An empty (tuple) index is a full scalar index into a zero-dimensional + # array. x[()] returns a scalar if x is zero-dimensional and a view + # otherwise. On the other hand, x[...] always returns a view. + + scalar = arr[()] + return scalar def _bin_str_to_ndarray(data, order, shape, np_type_name, data_endianness): @@ -354,15 +387,6 @@ def _lists_of_obj_to_ndarray(data, order, shape, dtype): return arr -def _scalar_to_numpy(data, dtype): - """ - From scalar value to numpy type. - """ - import numpy as nptypes - dtype = getattr(nptypes, dtype) - return dtype(data) - - def json_nonumpy_obj_hook(dct): """ This hook has no effect except to check if you're trying to decode numpy arrays without support, and give you a useful message. diff --git a/json_tricks/encoders.py b/json_tricks/encoders.py index c5c3213..eb38317 100644 --- a/json_tricks/encoders.py +++ b/json_tricks/encoders.py @@ -5,6 +5,7 @@ from fractions import Fraction from functools import wraps from json import JSONEncoder +from json.encoder import encode_basestring_ascii, encode_basestring, INFINITY import sys from .utils import hashodict, get_module_name_from_object, NoEnumException, NoPandasException, \ @@ -81,6 +82,54 @@ def default(self, obj, *args, **kwargs): type(obj), self.__class__.__name__, ', '.join(str(encoder) for encoder in self.obj_encoders))) return obj + def iterencode(self, o, _one_shot=False): + """Encode the given object and yield each string + representation as available. + + For example:: + + for chunk in JSONEncoder().iterencode(bigobject): + mysocket.write(chunk) + + """ + if self.check_circular: + markers = {} + else: + markers = None + if self.ensure_ascii: + _encoder = encode_basestring_ascii + else: + _encoder = encode_basestring + + def floatstr(o, allow_nan=self.allow_nan, + _repr=float.__repr__, _inf=INFINITY, _neginf=-INFINITY): + # Check for specials. Note that this type of test is processor + # and/or platform-specific, so do tests which don't depend on the + # internals. + + if o != o: + text = 'NaN' + elif o == _inf: + text = 'Infinity' + elif o == _neginf: + text = '-Infinity' + else: + return _repr(o) + + if not allow_nan: + raise ValueError( + "Out of range float values are not JSON compliant: " + + repr(o)) + + return text + + + _iterencode = _make_iterencode( + markers, self.default, _encoder, self.indent, floatstr, + self.key_separator, self.item_separator, self.sort_keys, + self.skipkeys, _one_shot) + return _iterencode(o, 0) + def json_date_time_encode(obj, primitives=False): """ @@ -375,7 +424,9 @@ def numpy_encode(obj, primitives=False, properties=None): :param primitives: If True, arrays are serialized as (nested) lists without meta info. """ - from numpy import ndarray, generic + from numpy import ndarray, generic, datetime64 + + scalar_types = (generic, datetime64) if isinstance(obj, ndarray): if primitives: @@ -407,17 +458,19 @@ def numpy_encode(obj, primitives=False, properties=None): ('__ndarray__', data_json), ('dtype', str(obj.dtype)), ('shape', obj.shape), + ('0dim', obj.ndim == 0), )) if len(obj.shape) > 1: dct['Corder'] = obj.flags['C_CONTIGUOUS'] if use_compact and store_endianness != 'suppress': dct['endian'] = store_endianness or sys.byteorder return dct - elif isinstance(obj, generic): - if NumpyEncoder.SHOW_SCALAR_WARNING: - NumpyEncoder.SHOW_SCALAR_WARNING = False - warnings.warn('json-tricks: numpy scalar serialization is experimental and may work differently in future versions') - return obj.item() + elif isinstance(obj, scalar_types): + return hashodict(( + ('__ndarray__', obj.item()), + ('dtype', str(obj.dtype)), + ('0dim', False), + )) return obj @@ -476,3 +529,196 @@ def default(self, obj, *args, **kwargs): warnings.warn('`NoNumpyEncoder` is deprecated, use `nonumpy_encode`', JsonTricksDeprecation) obj = nonumpy_encode(obj) return super(NoNumpyEncoder, self).default(obj, *args, **kwargs) + +def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, + _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot, + ## HACK: hand-optimized bytecode; turn globals into locals + ValueError=ValueError, + dict=dict, + float=float, + id=id, + int=int, + isinstance=isinstance, + list=list, + str=str, + tuple=tuple, + _intstr=int.__repr__, + ): + + try: + import numpy + def isfloatinstance(obj): + return isinstance(obj, float) and not isinstance(obj, numpy.number) + except ImportError: + def isfloatinstance(obj): + return isinstance(obj, float) + + if _indent is not None and not isinstance(_indent, str): + _indent = ' ' * _indent + + def _iterencode_list(lst, _current_indent_level): + if not lst: + yield '[]' + return + if markers is not None: + markerid = id(lst) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = lst + buf = '[' + if _indent is not None: + _current_indent_level += 1 + newline_indent = '\n' + _indent * _current_indent_level + separator = _item_separator + newline_indent + buf += newline_indent + else: + newline_indent = None + separator = _item_separator + first = True + for value in lst: + if first: + first = False + else: + buf = separator + if isinstance(value, str): + yield buf + _encoder(value) + elif value is None: + yield buf + 'null' + elif value is True: + yield buf + 'true' + elif value is False: + yield buf + 'false' + elif isinstance(value, int): + # Subclasses of int/float may override __repr__, but we still + # want to encode them as integers/floats in JSON. One example + # within the standard library is IntEnum. + yield buf + _intstr(value) + elif isfloatinstance(value): + # see comment above for int + yield buf + _floatstr(value) + else: + yield buf + if isinstance(value, (list, tuple)): + chunks = _iterencode_list(value, _current_indent_level) + elif isinstance(value, dict): + chunks = _iterencode_dict(value, _current_indent_level) + else: + chunks = _iterencode(value, _current_indent_level) + yield from chunks + if newline_indent is not None: + _current_indent_level -= 1 + yield '\n' + _indent * _current_indent_level + yield ']' + if markers is not None: + del markers[markerid] + + def _iterencode_dict(dct, _current_indent_level): + if not dct: + yield '{}' + return + if markers is not None: + markerid = id(dct) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = dct + yield '{' + if _indent is not None: + _current_indent_level += 1 + newline_indent = '\n' + _indent * _current_indent_level + item_separator = _item_separator + newline_indent + yield newline_indent + else: + newline_indent = None + item_separator = _item_separator + first = True + if _sort_keys: + items = sorted(dct.items()) + else: + items = dct.items() + for key, value in items: + if isinstance(key, str): + pass + # JavaScript is weakly typed for these, so it makes sense to + # also allow them. Many encoders seem to do something like this. + elif isinstance(key, float): + # see comment for int/float in _make_iterencode + key = _floatstr(key) + elif key is True: + key = 'true' + elif key is False: + key = 'false' + elif key is None: + key = 'null' + elif isinstance(key, int): + # see comment for int/float in _make_iterencode + key = _intstr(key) + elif _skipkeys: + continue + else: + raise TypeError(f'keys must be str, int, float, bool or None, ' + f'not {key.__class__.__name__}') + if first: + first = False + else: + yield item_separator + yield _encoder(key) + yield _key_separator + if isinstance(value, str): + yield _encoder(value) + elif value is None: + yield 'null' + elif value is True: + yield 'true' + elif value is False: + yield 'false' + elif isinstance(value, int): + # see comment for int/float in _make_iterencode + yield _intstr(value) + elif isfloatinstance(value): + # see comment for int/float in _make_iterencode + yield _floatstr(value) + else: + if isinstance(value, (list, tuple)): + chunks = _iterencode_list(value, _current_indent_level) + elif isinstance(value, dict): + chunks = _iterencode_dict(value, _current_indent_level) + else: + chunks = _iterencode(value, _current_indent_level) + yield from chunks + if newline_indent is not None: + _current_indent_level -= 1 + yield '\n' + _indent * _current_indent_level + yield '}' + if markers is not None: + del markers[markerid] + + def _iterencode(o, _current_indent_level): + if isinstance(o, str): + yield _encoder(o) + elif o is None: + yield 'null' + elif o is True: + yield 'true' + elif o is False: + yield 'false' + elif isinstance(o, int): + # see comment for int/float in _make_iterencode + yield _intstr(o) + elif isfloatinstance(o): + # see comment for int/float in _make_iterencode + yield _floatstr(o) + elif isinstance(o, (list, tuple)): + yield from _iterencode_list(o, _current_indent_level) + elif isinstance(o, dict): + yield from _iterencode_dict(o, _current_indent_level) + else: + if markers is not None: + markerid = id(o) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = o + o = _default(o) + yield from _iterencode(o, _current_indent_level) + if markers is not None: + del markers[markerid] + return _iterencode diff --git a/json_tricks/utils.py b/json_tricks/utils.py index bf8a9dc..b6e08b8 100644 --- a/json_tricks/utils.py +++ b/json_tricks/utils.py @@ -120,6 +120,7 @@ def get_scalar_repr(npscalar): ('__ndarray__', npscalar.item()), ('dtype', str(npscalar.dtype)), ('shape', ()), + ('0dim', False), )) diff --git a/tests/test_np.py b/tests/test_np.py index 4e28393..675ee0e 100644 --- a/tests/test_np.py +++ b/tests/test_np.py @@ -7,10 +7,13 @@ import sys from warnings import catch_warnings, simplefilter -from pytest import warns +from _pytest.recwarn import warns +from datetime import datetime, timezone + from numpy import arange, ones, array, array_equal, finfo, iinfo, pi from numpy import int8, int16, int32, int64, uint8, uint16, uint32, uint64, \ - float16, float32, float64, complex64, complex128, zeros, ndindex + float16, float32, float64, complex64, complex128, zeros, ndindex, \ + datetime64 from numpy.core.umath import exp from numpy.testing import assert_equal @@ -119,20 +122,6 @@ def test_memory_order(): arrF.flags['F_CONTIGUOUS'] == arr.flags['F_CONTIGUOUS'] -def test_scalars_types(): - # from: https://docs.scipy.org/doc/numpy/user/basics.types.html - encme = [] - for dtype in DTYPES: - for val in (dtype(0),) + get_lims(dtype): - assert isinstance(val, dtype) - encme.append(val) - json = dumps(encme, indent=2) - rec = loads(json) - assert encme == rec - for nr in rec: - assert nr.__class__ in (int, float, complex), 'primitive python type expected, see issue #18' - - def test_array_types(): # from: https://docs.scipy.org/doc/numpy/user/basics.types.html # see also `test_scalars_types` @@ -181,6 +170,23 @@ def test_dump_np_scalars(): assert data[2][3] == rec[2][3] assert data[2] == tuple(rec[2]) + json_tricks_3_17_3_encoded = '[' \ + '{"__ndarray__": -27, "dtype": "int8", "shape": []}, '\ + '{"__ndarray__": {"__complex__": [2.7182817459106445, 37.0]}, "dtype": "complex64", "shape": []}, ' \ + '[{"alpha": {"__ndarray__": -22026.465794806718, "dtype": "float64", "shape": []}, ' \ + '"str-only": {"__ndarray__": {"__complex__": [-1.0, -1.0]}, "dtype": "complex64", "shape": []}}, ' \ + '{"__ndarray__": 123456789, "dtype": "uint32", "shape": []}, ' \ + '{"__ndarray__": 0.367919921875, "dtype": "float16", "shape": []}, ' \ + '{"__set__": [{"__ndarray__": 37, "dtype": "int64", "shape": []}, ' \ + '{"__ndarray__": 0, "dtype": "uint64", "shape": []}]}]]' + rec = loads(json_tricks_3_17_3_encoded) + assert data[0] == rec[0] + assert data[1] == rec[1] + assert data[2][0] == rec[2][0] + assert data[2][1] == rec[2][1] + assert data[2][2] == rec[2][2] + assert data[2][3] == rec[2][3] + assert data[2] == tuple(rec[2]) def test_ndarray_object_nesting(): # Based on issue 53 @@ -223,8 +229,8 @@ def test_compact_mode_unspecified(): gz_json_2 = dumps(data, compression=True) assert gz_json_1 == gz_json_2 json = gzip_decompress(gz_json_1).decode('ascii') - assert json == '[{"__ndarray__": [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]], "dtype": "float64", "shape": [2, 4], "Corder": true}, ' \ - '{"__ndarray__": [3.141592653589793, 2.718281828459045], "dtype": "float64", "shape": [2]}]' + assert json == '[{"__ndarray__": [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]], "dtype": "float64", "shape": [2, 4], "0dim": false, "Corder": true}, ' \ + '{"__ndarray__": [3.141592653589793, 2.718281828459045], "dtype": "float64", "shape": [2], "0dim": false}]' def test_compact(): @@ -238,8 +244,8 @@ def test_encode_disable_compact(): data = [array([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]), array([pi, exp(1)])] gz_json = dumps(data, compression=True, properties=dict(ndarray_compact=False)) json = gzip_decompress(gz_json).decode('ascii') - assert json == '[{"__ndarray__": [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]], "dtype": "float64", "shape": [2, 4], "Corder": true}, ' \ - '{"__ndarray__": [3.141592653589793, 2.718281828459045], "dtype": "float64", "shape": [2]}]' + assert json == '[{"__ndarray__": [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]], "dtype": "float64", "shape": [2, 4], "0dim": false, "Corder": true}, ' \ + '{"__ndarray__": [3.141592653589793, 2.718281828459045], "dtype": "float64", "shape": [2], "0dim": false}]' def test_encode_enable_compact_little_endian(): @@ -247,9 +253,9 @@ def test_encode_enable_compact_little_endian(): gz_json = dumps(data, compression=True, properties=dict(ndarray_compact=True, ndarray_store_byteorder='little')) json = gzip_decompress(gz_json).decode('ascii') assert json == '[{"__ndarray__": "b64:AAAAAAAA8D8AAAAAAAAAQAAAAAAAAAhAAAAAAAAAEEAAAAAAAAA' \ - 'UQAAAAAAAABhAAAAAAAAAHEAAAAAAAAAgQA==", "dtype": "float64", "shape": [2, 4], "Corder": ' \ + 'UQAAAAAAAABhAAAAAAAAAHEAAAAAAAAAgQA==", "dtype": "float64", "shape": [2, 4], "0dim": false, "Corder": ' \ 'true, "endian": "little"}, {"__ndarray__": "b64:GC1EVPshCUBpVxSLCr8FQA==", "dtype": "float64", ' \ - '"shape": [2], "endian": "little"}]' + '"shape": [2], "0dim": false, "endian": "little"}]' def test_encode_enable_compact_big_endian(): @@ -257,8 +263,8 @@ def test_encode_enable_compact_big_endian(): gz_json = dumps(data, compression=True, properties=dict(ndarray_compact=True, ndarray_store_byteorder='big')) json = gzip_decompress(gz_json).decode('ascii') assert json == '{"__ndarray__": "b64:P/AAAAAAAABAAAAAAAAAAEAIAAAAAAAAQBAAAAAAAABAFAAAAAAAAEAYAA' \ - 'AAAAAAQBwAAAAAAABAIAAAAAAAAA==", "dtype": "float64", "shape": [2, 4], "Corder": ' \ - 'true, "endian": "big"}' + 'AAAAAAQBwAAAAAAABAIAAAAAAAAA==", "dtype": "float64", "shape": [2, 4], "0dim": false, ' \ + '"Corder": true, "endian": "big"}' def test_encode_enable_compact_native_endian(): @@ -267,11 +273,11 @@ def test_encode_enable_compact_native_endian(): json = gzip_decompress(gz_json).decode('ascii') if sys.byteorder == 'little': assert json == '{"__ndarray__": "b64:AAAAAAAA8D8AAAAAAAAAQAAAAAAAAAhAAAAAAAAAEEAAAAAAAAA' \ - 'UQAAAAAAAABhAAAAAAAAAHEAAAAAAAAAgQA==", "dtype": "float64", "shape": [2, 4], "Corder": ' \ + 'UQAAAAAAAABhAAAAAAAAAHEAAAAAAAAAgQA==", "dtype": "float64", "shape": [2, 4], "0dim": false, "Corder": ' \ 'true, "endian": "little"}' elif sys.byteorder == 'big': assert json == '{"__ndarray__": "b64:P/AAAAAAAABAAAAAAAAAAEAIAAAAAAAAQBAAAAAAAABAFAAAAAAAAEAYAA' \ - 'AAAAAAQBwAAAAAAABAIAAAAAAAAA==", "dtype": "float64", "shape": [2, 4], "Corder": ' \ + 'AAAAAAQBwAAAAAAABAIAAAAAAAAA==", "dtype": "float64", "shape": [2, 4], "0dim": false, "Corder": ' \ 'true, "endian": "big"}' else: raise Exception("unknown system endianness '{}'".format(sys.byteorder)) @@ -289,9 +295,9 @@ def test_encode_compact_cutoff(): gz_json = dumps(data, compression=True, properties=dict(ndarray_compact=5, ndarray_store_byteorder='little')) json = gzip_decompress(gz_json).decode('ascii') assert json == '[{"__ndarray__": "b64:AAAAAAAA8D8AAAAAAAAAQAAAAAAAAAhAAAAAAAAAEEAAAAAAAAA' \ - 'UQAAAAAAAABhAAAAAAAAAHEAAAAAAAAAgQA==", "dtype": "float64", "shape": [2, 4], "Corder": ' \ + 'UQAAAAAAAABhAAAAAAAAAHEAAAAAAAAAgQA==", "dtype": "float64", "shape": [2, 4], "0dim": false, "Corder": ' \ 'true, "endian": "little"}, {"__ndarray__": [3.141592653589793, 2.718281828459045], "dtype": "float64", ' \ - '"shape": [2]}]' + '"shape": [2], "0dim": false}]' def test_encode_compact_inline_compression(): @@ -299,7 +305,7 @@ def test_encode_compact_inline_compression(): json = dumps(data, compression=False, properties=dict(ndarray_compact=True, ndarray_store_byteorder='little')) assert 'b64.gz:' in json, 'If the overall file is not compressed and there are significant savings, then do inline gzip compression.' assert json == '[{"__ndarray__": "b64.gz:H4sIAAAAAAAC/2NgAIEP9gwQ4AChOKC0AJQWgdISUFoGSitAaSUorQKl1aC0BpTWgtI6UFoPShs4AABmfqWAgAAAAA==", ' \ - '"dtype": "float64", "shape": [4, 4], "Corder": true, "endian": "little"}]' + '"dtype": "float64", "shape": [4, 4], "0dim": false, "Corder": true, "endian": "little"}]' def test_encode_compact_no_inline_compression(): @@ -307,7 +313,7 @@ def test_encode_compact_no_inline_compression(): json = dumps(data, compression=False, properties=dict(ndarray_compact=True, ndarray_store_byteorder='little')) assert 'b64.gz:' not in json, 'If the overall file is not compressed, but there are no significant savings, then do not do inline compression.' assert json == '[{"__ndarray__": "b64:AAAAAAAA8D8AAAAAAAAAQAAAAAAAAAhAAAAAAAAAEEA=", ' \ - '"dtype": "float64", "shape": [2, 2], "Corder": true, "endian": "little"}]' + '"dtype": "float64", "shape": [2, 2], "0dim": false, "Corder": true, "endian": "little"}]' def test_decode_compact_mixed_compactness(): @@ -369,17 +375,109 @@ def test_empty(): assert_equal(loads(json), data, 'shape = {} ; json = {}'.format(data.shape, json)) def test_decode_writeable(): - # issue https://github.com/mverleg/pyjson_tricks/issues/90 - data = zeros((2, 2)) + # issue https://github.com/mverleg/pyjson_tricks/issues/90 + data = zeros((2, 2)) + + data_uncompressed = dumps(data) + data_compressed = dumps(data, properties={'ndarray_compact': True}) + + reloaded_uncompressed = loads(data_uncompressed) + reloaded_compressed = loads(data_compressed) + + assert array_equal(data, reloaded_uncompressed) + assert array_equal(data, reloaded_compressed) + + assert reloaded_uncompressed.flags.writeable + assert reloaded_compressed.flags.writeable + + +def test_0_dimensional_array_roundtrip(): + to_dump = zeros((), dtype='uint32') + to_dump[...] = 123 + + the_dumps = dumps(to_dump) + loaded = loads(the_dumps) + assert loaded == to_dump + + the_double_dumps = dumps(loaded) + assert the_dumps == the_double_dumps + + +def test_0_dimensional_array_roundtrip_object(): + the_set = set([1, 2, 3]) + + # We are putting it an object in a numpy array. this should serialize correctly + to_dump = zeros((), dtype=object) + to_dump[...] = the_set + + the_dumps = dumps(to_dump) + the_load = loads(the_dumps) + the_double_dumps = dumps(the_load) + + assert the_dumps == the_double_dumps + + assert isinstance(the_load[()], set) + assert the_set == the_load[()] + + +def test_scalar_roundtrip(): + to_dump = [ + uint8(1), + uint16(2), + uint32(3), + uint64(4), + int8(1), + int16(2), + int32(3), + int64(4), + float32(1), + float64(2), + ] + + the_dumps = dumps(to_dump) + the_load = loads(the_dumps) + + for original, read in zip(to_dump, the_load): + assert original == read + assert original.__class__ == read.__class__ + + the_double_dumps = dumps(loads(dumps(to_dump))) + + assert the_dumps == the_double_dumps + + +def test_round_trip_datetime64_scalars(): + now_utc = datetime.now(timezone.utc).replace(tzinfo=None) + now_M = datetime64(now_utc, 'M') + now_D = datetime64(now_utc, 'D') + now_h = datetime64(now_utc, 'h') + now_m = datetime64(now_utc, 'm') + now_s = datetime64(now_utc, 's') + now_ms = datetime64(now_utc, 'ms') + now_us = datetime64(now_utc, 'us') + now_ns = datetime64(now_utc, 'ns') + + to_dump = [ + now_M, + now_D, + now_h, + now_m, + now_s, + now_ms, + now_us, + now_ns, + now_us, + now_ns, + ] - data_uncompressed = dumps(data) - data_compressed = dumps(data, properties={'ndarray_compact': True}) + the_dumps = dumps(to_dump) + the_load = loads(the_dumps) - reloaded_uncompressed = loads(data_uncompressed) - reloaded_compressed = loads(data_compressed) + for original, read in zip(to_dump, the_load): + assert original == read + assert original.__class__ == read.__class__ + assert original.dtype == read.dtype - assert array_equal(data, reloaded_uncompressed) - assert array_equal(data, reloaded_compressed) + the_double_dumps = dumps(loads(dumps(to_dump))) - assert reloaded_uncompressed.flags.writeable - assert reloaded_compressed.flags.writeable + assert the_dumps == the_double_dumps