This commit is contained in:
Will McGugan
2021-09-29 16:22:55 +01:00
parent 0b34b18477
commit d0b14a351c
12 changed files with 417 additions and 9 deletions

View File

@@ -32,50 +32,52 @@ class Box:
) -> "RenderResult":
width = options.max_width
top, right, bottom, left = self.sides
top, right, bottom, left = (
side if side != "none" else "" for side in self.sides
)
top_style, right_style, bottom_style, left_style = map(
console.get_style, self.styles
)
BOX = BOX_STYLES
renderable = self.renderable
render_width = width - (left != "none") - (right != "none")
render_width = width - bool(left) - bool(right)
lines = console.render_lines(renderable, options.update_width(render_width))
new_line = Segment.line()
if top != "none":
char_left, char_mid, char_right = iter(BOX[top][0])
row = f"{char_left if left != 'none' else ''}{char_mid * render_width}{char_right if right != 'none' else ''}"
row = f"{char_left if left else ''}{char_mid * render_width}{char_right if right else ''}"
yield Segment(row, top_style)
yield new_line
if left == right == "none":
if not left and not right:
for line in lines:
yield from line
yield new_line
elif left != "none" and right != "none":
elif left and right:
left_segment = Segment(BOX[left][1][0], left_style)
right_segment = Segment(BOX[right][1][2] + "\n", right_style)
for line in lines:
yield left_segment
yield from line
yield right_segment
elif left != "none":
elif left:
left_segment = Segment(BOX[left][1][0], left_style)
for line in lines:
yield left_segment
yield from line
yield new_line
elif right != "none":
elif right:
right_segment = Segment(BOX[right][1][2] + "\n", right_style)
for line in lines:
yield from line
yield right_segment
if bottom != "none":
if bottom:
char_left, char_mid, char_right = iter(BOX[bottom][2])
row = f"{char_left if left != 'none' else ''}{char_mid * render_width}{char_right if right != 'none' else ''}"
row = f"{char_left if left else ''}{char_mid * render_width}{char_right if right else ''}"
yield Segment(row, bottom_style)
yield new_line

3
src/textual/css/Makefile Normal file
View File

@@ -0,0 +1,3 @@
parser.py:
python -m lark.tools.standalone css.g > parser.py
black parser.py

View File

45
src/textual/css/box.py Normal file
View File

@@ -0,0 +1,45 @@
from dataclasses import dataclass, field
@dataclass(frozen=True)
class Space:
top: int = 0
right: int = 0
bottom: int = 0
left: int = 0
def __str__(self) -> str:
return f"{self.top}, {self.right}, {self.bottom}, {self.left}"
@dataclass(frozen=True)
class Edge:
line: str = "none"
style: str = "default"
@dataclass(frozen=True)
class Border:
top: Edge = field(default_factory=Edge)
right: Edge = field(default_factory=Edge)
bottom: Edge = field(default_factory=Edge)
left: Edge = field(default_factory=Edge)
@dataclass
class Box:
padding: Space = field(default_factory=Space)
margin: Space = field(default_factory=Space)
border: Border = field(default_factory=Border)
outline: Border = field(default_factory=Border)
dispay: bool = True
visible: bool = True
text: str = ""
opacity: float = 1.0
if __name__ == "__main__":
from rich import print
box = Box()
print(box)

View File

@@ -0,0 +1,3 @@
.foo {
color: blue
}

3
src/textual/css/id.css Normal file
View File

@@ -0,0 +1,3 @@
#foo {
color: red
}

43
src/textual/css/model.py Normal file
View File

@@ -0,0 +1,43 @@
from __future__ import annotations
from dataclasses import dataclass, field
from enum import Enum
class SelectorType(Enum):
UNIVERSAL = 1
TYPE = 2
CLASS = 3
ID = 4
class CombinatorType(Enum):
SAME = 1
DESCENDENT = 2
CHILD = 3
@dataclass
class Location:
line: tuple[int, int]
column: tuple[int, int]
@dataclass
class RuleSet:
selectors: list[list[Selector]] = field(default_factory=list)
declarations: list[Declaration] = field(default_factory=list)
@dataclass
class Selector:
name: str
combinator: CombinatorType = CombinatorType.SAME
selector: SelectorType = SelectorType.TYPE
pseudo_classes: list[str] = field(default_factory=list)
@dataclass
class Declaration:
name: str
tokens: list[tuple[str, str]] = field(default_factory=list)

106
src/textual/css/parse.py Normal file
View File

@@ -0,0 +1,106 @@
from __future__ import annotations
from rich import print
from typing import Callable, Iterator, Iterable
from .tokenize import tokenize, Token
from .model import Declaration, RuleSet, Selector, CombinatorType, SelectorType
SELECTOR_MAP = {
"selector": SelectorType.TYPE,
"selector_start": SelectorType.TYPE,
"selector_class": SelectorType.CLASS,
"selector_start_class": SelectorType.CLASS,
"selector_id": SelectorType.ID,
"selector_start_id": SelectorType.ID,
"selector_universal": SelectorType.UNIVERSAL,
"selector_start_universal": SelectorType.UNIVERSAL,
}
def parse_rule_set(tokens: Iterator[Token], token: Token) -> Iterable[RuleSet]:
rule_set = RuleSet()
get_selector = SELECTOR_MAP.get
combinator = CombinatorType.SAME
selectors: list[Selector] = []
while True:
if token.name == "pseudo_class":
selectors[-1].pseudo_classes.append(token.value.lstrip(":"))
elif token.name == "whitespace":
if combinator == CombinatorType.SAME:
combinator = CombinatorType.DESCENDENT
elif token.name == "new_selector":
rule_set.selectors.append(selectors[:])
selectors.clear()
combinator = CombinatorType.SAME
elif token.name == "declaration_set_start":
break
else:
selectors.append(
Selector(
name=token.value.lstrip(".#"),
combinator=combinator,
selector=get_selector(token.name, SelectorType.TYPE),
)
)
combinator = CombinatorType.SAME
token = next(tokens)
if selectors:
rule_set.selectors.append(selectors[:])
declaration = Declaration("")
while True:
token = next(tokens)
token_name = token.name
if token_name in ("whitespace", "declaration_end"):
continue
if token_name == "declaration_name":
if declaration.tokens:
rule_set.declarations.append(declaration)
declaration = Declaration("")
declaration.name = token.value.rstrip(":")
elif token_name == "declaration_set_end":
break
else:
declaration.tokens.append((token_name, token.value))
if declaration.tokens:
rule_set.declarations.append(declaration)
yield rule_set
def parse(css: str) -> Iterable[RuleSet]:
tokens = iter(tokenize(css))
while True:
token = next(tokens)
if token.name.startswith("selector_start_"):
yield from parse_rule_set(tokens, token)
if __name__ == "__main__":
test = """
.foo.bar baz:focus, #egg {
color: rgb(10,20,30);
border: bold green;
}
* {
outline: red
}
"""
for obj in parse(test):
print(obj)

0
src/textual/css/rules.py Normal file
View File

View File

@@ -0,0 +1,5 @@
#foo .bar {
border: double red;
color: blue;
text: italic white on black;
}

102
src/textual/css/tokenize.py Normal file
View File

@@ -0,0 +1,102 @@
from __future__ import annotations
import re
from typing import Iterable
from rich import print
from .tokenizer import Expect, Tokenizer, Token
expect_selector = Expect(
whitespace=r"\s+",
comment_start=r"\/\*",
selector_start_id=r"\#[a-zA-Z_\-]+",
selector_start_class=r"\.[a-zA-Z_\-]+",
selector_start_universal=r"\*",
selector_start=r"[a-zA-Z_\-]+",
)
expect_comment_end = Expect(
comment_end=re.escape("*/"),
)
expect_selector_continue = Expect(
whitespace=r"\s+",
comment_start=r"\/\*",
pseudo_class=r"\:[a-zA-Z_-]+",
selector_id=r"\#[a-zA-Z_\-]+",
selector_class=r"\.[a-zA-Z_\-]+",
selector_universal=r"\*",
selector=r"[a-zA-Z_\-]+",
combinator_child=">",
new_selector=r",",
declaration_set_start=r"\{",
)
expect_declaration = Expect(
whitespace=r"\s+",
comment_start=r"\/\*",
declaration_name=r"[a-zA-Z_\-]+\:",
declaration_set_end=r"\}",
)
expect_declaration_content = Expect(
declaration_end=r"\n|;",
whitespace=r"\s+",
comment_start=r"\/\*",
percentage=r"\d+\%",
number=r"\d+\.?\d+",
color=r"\#([0-9a-f]{6})|color\([0-9]{1,3}\)|rgb\([\d\s,]+\)",
token="[a-zA-Z_-]+",
string='".*?"',
declaration_set_end=r"\}",
)
_STATES = {
"selector_start": expect_selector_continue,
"selector_start_id": expect_selector_continue,
"selector_start_class": expect_selector_continue,
"selector_start_universal": expect_selector_continue,
"selector_id": expect_selector_continue,
"selector_class": expect_selector_continue,
"selector_universal": expect_selector_continue,
"declaration_set_start": expect_declaration,
"declaration_name": expect_declaration_content,
"declaration_end": expect_declaration,
"declaration_set_end": expect_selector,
}
def tokenize(code: str) -> Iterable[Token]:
tokenizer = Tokenizer(code)
expect = expect_selector
get_token = tokenizer.get_token
get_state = _STATES.get
while True:
token = get_token(expect)
name = token.name
if name == "comment_start":
tokenizer.skip_to(expect_comment_end)
expect = get_state(name, expect)
yield token
if __name__ == "__main__":
test = """
foo:hover { bar: baz }
/* foo */
.foo.bar baz, fob {
color: red "hello world" 20% 3.14;
background-color: red
}
* {
outline: bold
}
"""
for token in tokenize(test):
print(token)

View File

@@ -0,0 +1,96 @@
from __future__ import annotations
from typing import NamedTuple
import re
from rich import print
import rich.repr
class EOFError(Exception):
pass
class TokenizeError(Exception):
def __init__(self, col_no: int, row_no: int, message: str) -> None:
self.col_no = col_no
self.row_no = row_no
super().__init__(message)
class Expect:
def __init__(self, **tokens: str) -> None:
self.names = list(tokens.keys())
self.regexes = list(tokens.values())
self._regex = re.compile(
"("
+ "|".join(f"(?P<{name}>{regex})" for name, regex in tokens.items())
+ ")"
)
self.match = self._regex.match
self.search = self._regex.search
def __rich_repr__(self) -> rich.repr.Result:
yield from zip(self.names, self.regexes)
class Token(NamedTuple):
line: int
col: int
name: str
value: str
class Tokenizer:
def __init__(self, text: str) -> None:
self.lines = text.splitlines(keepends=True)
self.line_no = 0
self.col_no = 0
def get_token(self, expect: Expect) -> Token:
line_no = self.line_no
if line_no >= len(self.lines):
raise EOFError()
col_no = self.col_no
line = self.lines[line_no]
match = expect.match(line, col_no)
if match is None:
raise TokenizeError(
line_no,
col_no,
"expected " + ", ".join(name.upper() for name in expect.names),
)
iter_groups = iter(match.groups())
next(iter_groups)
for name, value in zip(expect.names, iter_groups):
if value is not None:
break
try:
return Token(line_no, col_no, name, value)
finally:
col_no += len(value)
if col_no >= len(line):
line_no += 1
col_no = 0
self.line_no = line_no
self.col_no = col_no
def skip_to(self, expect: Expect) -> Token:
line_no = self.line_no
col_no = self.col_no
while True:
if line_no >= len(self.lines):
raise EOFError()
line = self.lines[line_no]
match = expect.search(line, col_no)
if match is None:
line_no += 1
col_no = 0
else:
self.line_no = line_no
self.col_no = match.span(0)[0]
return self.get_token(expect)