1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
|
import dataclasses
from typing import List, Optional, FrozenSet, TYPE_CHECKING
from debputy.lsp.lsp_features import SEMANTIC_TOKENS_LEGEND
from debputy.util import grouper
if TYPE_CHECKING:
import lsprotocol.types as types
else:
import debputy.lsprotocol.types as types
@dataclasses.dataclass(slots=True, frozen=True)
class ResolvedSemanticToken:
range: types.Range
value: str
token_name: str
modifiers: frozenset[str] = frozenset()
def resolved_semantic_token(
line_no: int,
col_start: int,
value: str,
token_type: str,
*,
token_modifiers: frozenset[str] = frozenset(),
) -> ResolvedSemanticToken:
return ResolvedSemanticToken(
types.Range(
types.Position(
line_no,
col_start,
),
types.Position(
line_no,
col_start + len(value),
),
),
value,
token_type,
token_modifiers,
)
def resolve_semantic_tokens(
lines: list[str],
token_result: types.SemanticTokens | None,
) -> list[ResolvedSemanticToken] | None:
if token_result is None:
return None
assert (len(token_result.data) % 5) == 0
current_line = 0
current_col = 0
resolved_tokens = []
token_types = SEMANTIC_TOKENS_LEGEND.token_types
for token_data in grouper(token_result.data, 5, incomplete="strict"):
line_delta, col_start_delta, token_len, token_code, modifier_codes = token_data
if line_delta:
current_col = 0
current_line += line_delta
current_col += col_start_delta
assert (
not modifier_codes
), "TODO: Modifiers not supported (no modifiers defined)"
value = lines[current_line][current_col : current_col + token_len]
resolved_tokens.append(
resolved_semantic_token(
current_line,
current_col,
value,
token_types[token_code],
),
)
return resolved_tokens
|