Consider the following code:
>>> import json
>>> data = {
... 'x': [1, {'$special': 'a'}, 2],
... 'y': {'$special': 'b'},
... 'z': {'p': True, 'q': False}
... }
>>> print(json.dumps(data, indent=2))
{
"y": {
"$special": "b"
},
"z": {
"q": false,
"p": true
},
"x": [
1,
{
"$special": "a"
},
2
]
}
What I want is to format the JSON so that JSON objects that have only a single property '$special'
are rendered on a single line, as follows.
{
"y": {"$special": "b"},
"z": {
"q": false,
"p": true
},
"x": [
1,
{"$special": "a"},
2
]
}
I have played around with implementing a custom JSONEncoder
and passing that in to json.dumps
as the cls
argument, but the two methods on JSONEncoder
each have a problem:
The JSONEncoder
default
method is called for each part of data
, but the return value is not a raw JSON string, so there doesn't appear to be any way to adjust its formatting.
The JSONEncoder
encode
method does return a raw JSON string, but it is only called once for the data
as a whole.
Is there any way I can get JSONEncoder
to do what I want?
The json
module is not really designed to give you that much control over the output; indentation is mostly meant to aid readability while debugging.
Instead of making json
produce the output, you could transform the output using the standard library tokenize
module:
import tokenize
from io import BytesIO
def inline_special(json_data):
def adjust(t, ld,):
"""Adjust token line number by offset"""
(sl, sc), (el, ec) = t.start, t.end
return t._replace(start=(sl + ld, sc), end=(el + ld, ec))
def transform():
with BytesIO(json_data.encode('utf8')) as b:
held = [] # to defer newline tokens
lastend = None # to track the end pos of the prev token
loffset = 0 # line offset to adjust tokens by
tokens = tokenize.tokenize(b.readline)
for tok in tokens:
if tok.type == tokenize.NL:
# hold newlines until we know there's no special key coming
held.append(adjust(tok, loffset))
elif (tok.type == tokenize.STRING and
tok.string == '"$special"'):
# special string, collate tokens until the next rbrace
# held newlines are discarded, adjust the line offset
loffset -= len(held)
held = []
text = [tok.string]
while tok.exact_type != tokenize.RBRACE:
tok = next(tokens)
if tok.type != tokenize.NL:
text.append(tok.string)
if tok.string in ':,':
text.append(' ')
else:
loffset -= 1 # following lines all shift
line, col = lastend
text = ''.join(text)
endcol = col + len(text)
yield tokenize.TokenInfo(
tokenize.STRING, text, (line, col), (line, endcol),
'')
# adjust any remaining tokens on this line
while tok.type != tokenize.NL:
tok = next(tokens)
yield tok._replace(
start=(line, endcol),
end=(line, endcol + len(tok.string)))
endcol += len(tok.string)
else:
# uninteresting token, yield any held newlines
if held:
yield from held
held = []
# adjust and remember last position
tok = adjust(tok, loffset)
lastend = tok.end
yield tok
return tokenize.untokenize(transform()).decode('utf8')
This reformats your sample successfully:
import json
data = {
'x': [1, {'$special': 'a'}, 2],
'y': {'$special': 'b'},
'z': {'p': True, 'q': False}
}
>>> print(inline_special(json.dumps(data, indent=2)))
{
"x": [
1,
{"$special": "a"},
2
],
"y": {"$special": "b"},
"z": {
"p": true,
"q": false
}
}
I found the following regex-based solution to be simplest, albeit … regex-based.
import json
import re
data = {
'x': [1, {'$special': 'a'}, 2],
'y': {'$special': 'b'},
'z': {'p': True, 'q': False}
}
text = json.dumps(data, indent=2)
pattern = re.compile(r"""
{
\s*
"\$special"
\s*
:
\s*
"
((?:[^"]|\\"))* # Captures zero or more NotQuote or EscapedQuote
"
\s*
}
""", re.VERBOSE)
print(pattern.sub(r'{"$special": "\1"}', text))
The output follows.
{
"x": [
1,
{"$special": "a"},
2
],
"y": {"$special": "b"},
"z": {
"q": false,
"p": true
}
}
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With