1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
|
from typing import List, Optional, Union, TYPE_CHECKING
from collections.abc import Sequence, Iterable
from debputy.commands.debputy_cmd.output import MAN_URL_REWRITE
from debputy.lsprotocol.types import (
TextEdit,
Position,
Range,
WillSaveTextDocumentParams,
DocumentFormattingParams,
)
from debputy.linting.lint_util import LinterPositionCodec
try:
from debputy.lsp.vendoring._deb822_repro.locatable import (
Position as TEPosition,
Range as TERange,
)
from debputy.lsp.debputy_ls import DebputyLanguageServer
except ImportError:
pass
try:
from pygls.server import LanguageServer
from pygls.workspace import TextDocument, PositionCodec
except ImportError:
pass
if TYPE_CHECKING:
LintCapablePositionCodec = Union[LinterPositionCodec, PositionCodec]
else:
LintCapablePositionCodec = LinterPositionCodec
def markdown_urlify(uri: str) -> str:
if uri.startswith("man:"):
m = MAN_URL_REWRITE.match(uri)
if m:
page, section = m.groups()
link_url = f"https://manpages.debian.org/{page}.{section}"
return f"[{uri}]({link_url})"
return uri
return f"<{uri}>"
def normalize_dctrl_field_name(f: str) -> str:
if not f or not f.startswith(("x", "X")):
return f
i = 0
for i in range(1, len(f)):
if f[i] == "-":
i += 1
break
if f[i] not in ("b", "B", "s", "S", "c", "C"):
return f
assert i > 0
return f[i:]
def on_save_trim_end_of_line_whitespace(
ls: "LanguageServer",
params: WillSaveTextDocumentParams | DocumentFormattingParams,
) -> Sequence[TextEdit] | None:
doc = ls.workspace.get_text_document(params.text_document.uri)
return trim_end_of_line_whitespace(doc.position_codec, doc.lines)
def trim_end_of_line_whitespace(
position_codec: "LintCapablePositionCodec",
lines: list[str],
*,
line_range: Iterable[int] | None = None,
line_relative_line_no: int = 0,
) -> Sequence[TextEdit] | None:
edits = []
if line_range is None:
line_range = range(0, len(lines))
for line_no in line_range:
orig_line = lines[line_no]
orig_len = len(orig_line)
if orig_line.endswith("\n"):
orig_len -= 1
stripped_len = len(orig_line.rstrip())
if stripped_len == orig_len:
continue
stripped_len_client_off = position_codec.client_num_units(
orig_line[:stripped_len]
)
orig_len_client_off = position_codec.client_num_units(orig_line[:orig_len])
edit_range = position_codec.range_to_client_units(
lines,
Range(
Position(
line_no + line_relative_line_no,
stripped_len_client_off,
),
Position(
line_no + line_relative_line_no,
orig_len_client_off,
),
),
)
edits.append(
TextEdit(
edit_range,
"",
)
)
return edits
class SemanticTokensState:
__slots__ = ("ls", "doc", "lines", "tokens", "_previous_line", "_previous_col")
def __init__(
self,
ls: "DebputyLanguageServer",
doc: "TextDocument",
lines: list[str],
tokens: list[int],
) -> None:
self.ls = ls
self.doc = doc
self.lines = lines
self.tokens = tokens
self._previous_line = 0
self._previous_col = 0
def emit_token(
self,
start_pos: Position,
len_client_units: int,
token_code: int,
*,
token_modifiers: int = 0,
) -> None:
line_delta = start_pos.line - self._previous_line
self._previous_line = start_pos.line
previous_col = self._previous_col
if line_delta:
previous_col = 0
column_delta = start_pos.character - previous_col
self._previous_col = start_pos.character
tokens = self.tokens
tokens.append(line_delta) # Line delta
tokens.append(column_delta) # Token column delta
tokens.append(len_client_units) # Token length
tokens.append(token_code)
tokens.append(token_modifiers)
|