Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Optionally allow shorthand indexes in variable paths #165

Merged
merged 3 commits into from
Feb 8, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions liquid/builtin/tags/case_tag.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Tag and node definition for the built-in "case" tag."""

from __future__ import annotations

import sys
Expand Down Expand Up @@ -193,14 +194,16 @@ def parse(self, stream: TokenStream) -> ast.Node:

def _parse_case_expression(self, expr: str, linenum: int) -> Expression:
stream = ExpressionTokenStream(
tokenize_common_expression(expr, linenum=linenum)
tokenize_common_expression(expr, linenum=linenum),
shorthand_indexes=self.env.shorthand_indexes,
)
return parse_common_expression(stream)

def _parse_when_expression(self, expr: str, linenum: int) -> List[Expression]:
expressions = []
stream = ExpressionTokenStream(
tokenize_common_expression(expr, linenum=linenum)
tokenize_common_expression(expr, linenum=linenum),
shorthand_indexes=self.env.shorthand_indexes,
)

while True:
Expand Down
90 changes: 72 additions & 18 deletions liquid/environment.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
"""Shared configuration from which templates can be loaded and parsed."""

from __future__ import annotations

import warnings
from functools import lru_cache
from functools import partial
from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
Expand Down Expand Up @@ -154,6 +156,10 @@ class Environment:
# Whether to output blocks that only contain only whitespace when rendered.
render_whitespace_only_blocks: bool = False

# When `True`, accept indexes without enclosing square brackets in paths to
# variables. Defaults to `False`.
shorthand_indexes: bool = False

def __init__(
self,
tag_start_string: str = r"{%",
Expand Down Expand Up @@ -578,29 +584,77 @@ def set_expression_cache_size(self, maxsize: int = 0) -> None:
def _get_expression_parsers(
self, cache_size: int = 0
) -> Tuple[
Callable[[str, int], "BooleanExpression"],
Callable[[str, int], "BooleanExpression"],
Callable[[str, int], "FilteredExpression"],
Callable[[str, int], "FilteredExpression"],
Callable[[str, int], "FilteredExpression"],
Callable[[str, int], "LoopExpression"],
Callable[[str, int], BooleanExpression],
Callable[[str, int], BooleanExpression],
Callable[[str, int], FilteredExpression],
Callable[[str, int], FilteredExpression],
Callable[[str, int], FilteredExpression],
Callable[[str, int], LoopExpression],
]:
if cache_size >= 1:
return (
lru_cache(maxsize=cache_size)(parse_boolean_expression),
lru_cache(maxsize=cache_size)(parse_boolean_expression_with_parens),
lru_cache(maxsize=cache_size)(parse_conditional_expression),
lru_cache(maxsize=cache_size)(parse_conditional_expression_with_parens),
lru_cache(maxsize=cache_size)(parse_filtered_expression),
lru_cache(maxsize=cache_size)(parse_loop_expression),
lru_cache(maxsize=cache_size)(
partial(
parse_boolean_expression,
shorthand_indexes=self.shorthand_indexes,
)
),
lru_cache(maxsize=cache_size)(
partial(
parse_boolean_expression_with_parens,
shorthand_indexes=self.shorthand_indexes,
)
),
lru_cache(maxsize=cache_size)(
partial(
parse_conditional_expression,
shorthand_indexes=self.shorthand_indexes,
)
),
lru_cache(maxsize=cache_size)(
partial(
parse_conditional_expression_with_parens,
shorthand_indexes=self.shorthand_indexes,
)
),
lru_cache(maxsize=cache_size)(
partial(
parse_filtered_expression,
shorthand_indexes=self.shorthand_indexes,
)
),
lru_cache(maxsize=cache_size)(
partial(
parse_loop_expression,
shorthand_indexes=self.shorthand_indexes,
)
),
)
return (
parse_boolean_expression,
parse_boolean_expression_with_parens,
parse_conditional_expression,
parse_conditional_expression_with_parens,
parse_filtered_expression,
parse_loop_expression,
partial(
parse_boolean_expression,
shorthand_indexes=self.shorthand_indexes,
),
partial(
parse_boolean_expression_with_parens,
shorthand_indexes=self.shorthand_indexes,
),
partial(
parse_conditional_expression,
shorthand_indexes=self.shorthand_indexes,
),
partial(
parse_conditional_expression_with_parens,
shorthand_indexes=self.shorthand_indexes,
),
partial(
parse_filtered_expression,
shorthand_indexes=self.shorthand_indexes,
),
partial(
parse_loop_expression,
shorthand_indexes=self.shorthand_indexes,
),
)


Expand Down
19 changes: 15 additions & 4 deletions liquid/expressions/boolean/parse.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Functions for parsing boolean expressions."""

from typing import Callable
from typing import Dict

Expand Down Expand Up @@ -188,9 +189,15 @@ def parse_obj(
TOKEN_MAP[TOKEN_LPAREN] = parse_range


def parse(expr: str, linenum: int = 1) -> BooleanExpression:
def parse(
expr: str, linenum: int = 1, *, shorthand_indexes: bool = False
) -> BooleanExpression:
"""Parse a string as a "standard" boolean expression."""
return BooleanExpression(parse_obj(TokenStream(tokenize(expr, linenum))))
return BooleanExpression(
parse_obj(
TokenStream(tokenize(expr, linenum), shorthand_indexes=shorthand_indexes)
)
)


def parse_grouped_expression(stream: TokenStream) -> Expression:
Expand Down Expand Up @@ -261,13 +268,17 @@ def parse_obj_with_parens(
return left


def parse_with_parens(expr: str, linenum: int = 1) -> BooleanExpression:
def parse_with_parens(
expr: str, linenum: int = 1, *, shorthand_indexes: bool = False
) -> BooleanExpression:
"""Parse a string as a boolean expression.

This function handles expressions containing the logical `not` operator and
parentheses for grouping terms.
"""
stream = TokenStream(tokenize_with_parens(expr, linenum))
stream = TokenStream(
tokenize_with_parens(expr, linenum), shorthand_indexes=shorthand_indexes
)
rv = BooleanExpression(parse_obj_with_parens(stream))
peek_typ = stream.peek[1]
if peek_typ == TOKEN_RPAREN:
Expand Down
35 changes: 29 additions & 6 deletions liquid/expressions/common.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Patterns and parse functions common to multiple built-in expression types."""

from __future__ import annotations

import re
Expand Down Expand Up @@ -145,10 +146,17 @@ def parse_identifier(stream: "TokenStream") -> Identifier:
stream.next_token()
stream.expect(TOKEN_RBRACKET)
elif typ == TOKEN_FLOAT:
raise LiquidSyntaxError(
f"expected an identifier, found {val!r}",
linenum=pos,
)
if stream.shorthand_indexes:
path.extend(
IdentifierPathElement(to_int(i)) for i in val.rstrip(".").split(".")
)
else:
raise LiquidSyntaxError(
f"expected an identifier, found {val!r}",
linenum=pos,
)
elif typ == TOKEN_INTEGER and stream.shorthand_indexes:
path.append(IdentifierPathElement(to_int(val)))
elif typ == TOKEN_DOT:
pass
else:
Expand Down Expand Up @@ -194,7 +202,7 @@ def parse_unchained_identifier(stream: "TokenStream") -> Identifier:


def make_parse_range(
parse_obj: Callable[["TokenStream"], Expression]
parse_obj: Callable[["TokenStream"], Expression],
) -> Callable[["TokenStream"], RangeLiteral]:
"""Return a function that parses range expressions using _parse_obj_."""

Expand Down Expand Up @@ -246,6 +254,8 @@ def _parse_range_literal(stream: "TokenStream") -> RangeLiteral:
TOKEN_IDENTINDEX,
TOKEN_DOT,
TOKEN_LBRACKET,
TOKEN_INTEGER,
TOKEN_FLOAT,
)
)

Expand All @@ -259,7 +269,7 @@ def _parse_common_identifier(stream: "TokenStream") -> Identifier:
path: IdentifierPath = []

while True:
_, _type, value = stream.current
pos, _type, value = stream.current
if _type == TOKEN_IDENTIFIER:
path.append(IdentifierPathElement(value))
elif _type == TOKEN_IDENTINDEX:
Expand All @@ -269,6 +279,19 @@ def _parse_common_identifier(stream: "TokenStream") -> Identifier:
path.append(_parse_common_identifier(stream))
stream.next_token()
stream.expect(TOKEN_RBRACKET)
elif _type == TOKEN_FLOAT:
if stream.shorthand_indexes:
path.extend(
IdentifierPathElement(to_int(i))
for i in value.rstrip(".").split(".")
)
else:
raise LiquidSyntaxError(
f"expected an identifier, found {value!r}",
linenum=pos,
)
elif _type == TOKEN_INTEGER and stream.shorthand_indexes:
path.append(IdentifierPathElement(to_int(value)))
elif _type == TOKEN_DOT:
pass

Expand Down
37 changes: 26 additions & 11 deletions liquid/expressions/conditional/parse.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Functions for parsing non-standard conditional expressions."""

from functools import partial
from typing import Dict
from typing import Iterable
Expand Down Expand Up @@ -65,13 +66,15 @@ def _split_at_first(
split_at_first_else = partial(_split_at_first, _type=TOKEN_ELSE)


def _parse_filter(tokens: List[Token], linenum: int) -> Filter:
def _parse_filter(
tokens: List[Token], linenum: int, *, shorthand_indexes: bool = False
) -> Filter:
if not tokens:
raise LiquidSyntaxError(
"unexpected pipe or missing filter name", linenum=linenum
)

stream = TokenStream(iter(tokens))
stream = TokenStream(iter(tokens), shorthand_indexes=shorthand_indexes)
stream.expect(TOKEN_IDENTIFIER)
name = stream.current[2]

Expand Down Expand Up @@ -108,13 +111,17 @@ def _parse_filter(tokens: List[Token], linenum: int) -> Filter:
return Filter(name, args, kwargs)


def parse(expr: str, linenum: int = 1) -> FilteredExpression:
def parse(
expr: str, linenum: int = 1, *, shorthand_indexes: bool = False
) -> FilteredExpression:
"""Parse a conditional expression string."""
tokens = tokenize(expr, linenum)
standard_tokens, _conditional_tokens = split_at_first_if(tokens)

# This expression includes filters.
_expr = parse_standard_filtered(iter(standard_tokens), linenum)
_expr = parse_standard_filtered(
iter(standard_tokens), linenum, shorthand_indexes=shorthand_indexes
)

if not _conditional_tokens:
# A standard filtered expression
Expand All @@ -126,7 +133,10 @@ def parse(expr: str, linenum: int = 1) -> FilteredExpression:
)

if conditional_tokens:
condition = parse_boolean_obj(TokenStream(iter(conditional_tokens)), linenum)
condition = parse_boolean_obj(
TokenStream(iter(conditional_tokens), shorthand_indexes=shorthand_indexes),
linenum,
)

else:
# A missing condition (an `if` with nothing after it).
Expand All @@ -138,7 +148,7 @@ def parse(expr: str, linenum: int = 1) -> FilteredExpression:

if alternative_tokens:
alternative: Optional[Expression] = parse_standard_filtered(
iter(alternative_tokens), linenum
iter(alternative_tokens), linenum, shorthand_indexes=shorthand_indexes
)
else:
alternative = None
Expand All @@ -156,7 +166,9 @@ def parse(expr: str, linenum: int = 1) -> FilteredExpression:
return ConditionalExpression(_expr, tail_filters, condition, alternative)


def parse_with_parens(expr: str, linenum: int = 1) -> FilteredExpression:
def parse_with_parens(
expr: str, linenum: int = 1, shorthand_indexes: bool = False
) -> FilteredExpression:
"""Parse a conditional expression string.

This parse function handles logical `not` and grouping terms with parentheses.
Expand All @@ -165,7 +177,9 @@ def parse_with_parens(expr: str, linenum: int = 1) -> FilteredExpression:
standard_tokens, _conditional_tokens = split_at_first_if(tokens)

# This expression includes filters.
_expr = parse_standard_filtered(iter(standard_tokens), linenum)
_expr = parse_standard_filtered(
iter(standard_tokens), linenum, shorthand_indexes=shorthand_indexes
)

if not _conditional_tokens:
# A standard filtered expression
Expand All @@ -178,7 +192,8 @@ def parse_with_parens(expr: str, linenum: int = 1) -> FilteredExpression:

if conditional_tokens:
condition = parse_boolean_obj_with_parens(
TokenStream(iter(conditional_tokens)), linenum
TokenStream(iter(conditional_tokens), shorthand_indexes=shorthand_indexes),
linenum,
)
else:
# A missing condition (an `if` with nothing after it).
Expand All @@ -190,7 +205,7 @@ def parse_with_parens(expr: str, linenum: int = 1) -> FilteredExpression:

if alternative_tokens:
alternative: Optional[Expression] = parse_standard_filtered(
iter(alternative_tokens), linenum
iter(alternative_tokens), linenum, shorthand_indexes=shorthand_indexes
)
else:
alternative = None
Expand All @@ -199,7 +214,7 @@ def parse_with_parens(expr: str, linenum: int = 1) -> FilteredExpression:

if _filter_tokens:
tail_filters = [
_parse_filter(_tokens, linenum)
_parse_filter(_tokens, linenum, shorthand_indexes=shorthand_indexes)
for _tokens in split_at_pipe(iter(_filter_tokens))
]
else:
Expand Down
Loading