Coverage for src/debputy/lsp/lsp_generic_deb822.py: 82%
412 statements
« prev ^ index » next coverage.py v7.8.2, created at 2025-10-12 15:06 +0000
« prev ^ index » next coverage.py v7.8.2, created at 2025-10-12 15:06 +0000
1import re
2from itertools import chain
3from typing import (
4 Optional,
5 Union,
6 Tuple,
7 Any,
8 List,
9 cast,
10 TYPE_CHECKING,
11)
12from collections.abc import Sequence, Container, Iterable, Iterator, Callable
14from debputy.linting.lint_util import (
15 LintState,
16 te_position_to_lsp,
17 with_range_in_continuous_parts,
18)
19from debputy.lsp.config.config_options import DCO_SPELLCHECK_COMMENTS
20from debputy.lsp.debputy_ls import DebputyLanguageServer
21from debputy.lsp.lsp_debian_control_reference_data import (
22 Deb822FileMetadata,
23 Deb822KnownField,
24 StanzaMetadata,
25 F,
26 S,
27 SUBSTVAR_RE,
28 _DEP_RELATION_CLAUSE,
29)
30from debputy.lsp.lsp_features import SEMANTIC_TOKEN_TYPES_IDS
31from debputy.lsp.quickfixes import propose_correct_text_quick_fix
32from debputy.lsp.text_util import (
33 trim_end_of_line_whitespace,
34 SemanticTokensState,
35)
36from debputy.lsp.vendoring._deb822_repro.locatable import (
37 START_POSITION,
38 Range as TERange,
39 Position as TEPosition,
40)
41from debputy.lsp.vendoring._deb822_repro.parsing import (
42 Deb822KeyValuePairElement,
43 Deb822ParagraphElement,
44 Deb822FileElement,
45 Deb822CommentElement,
46 Deb822ParsedTokenList,
47 Interpretation,
48)
49from debputy.lsp.vendoring._deb822_repro.tokens import tokenize_deb822_file, Deb822Token
50from debputy.lsp.vendoring._deb822_repro.types import TokenOrElement
51from debputy.lsprotocol.types import (
52 CompletionParams,
53 CompletionList,
54 CompletionItem,
55 Position,
56 MarkupContent,
57 Hover,
58 MarkupKind,
59 HoverParams,
60 FoldingRangeParams,
61 FoldingRange,
62 FoldingRangeKind,
63 SemanticTokensParams,
64 SemanticTokens,
65 TextEdit,
66 MessageType,
67 SemanticTokenTypes,
68)
69from debputy.util import _info, _warn
71if TYPE_CHECKING:
72 import lsprotocol.types as types
73else:
74 import debputy.lsprotocol.types as types
77try:
78 from pygls.server import LanguageServer
79 from pygls.workspace import TextDocument
80except ImportError:
81 pass
84_CONTAINS_SPACE_OR_COLON = re.compile(r"[\s:]")
87def in_range(
88 te_range: TERange,
89 cursor_position: Position,
90 *,
91 inclusive_end: bool = False,
92) -> bool:
93 cursor_line = cursor_position.line
94 start_pos = te_range.start_pos
95 end_pos = te_range.end_pos
96 if cursor_line < start_pos.line_position or cursor_line > end_pos.line_position:
97 return False
99 if start_pos.line_position == end_pos.line_position:
100 start_col = start_pos.cursor_position
101 cursor_col = cursor_position.character
102 end_col = end_pos.cursor_position
103 if inclusive_end: 103 ↛ 105line 103 didn't jump to line 105 because the condition on line 103 was always true
104 return start_col <= cursor_col <= end_col
105 return start_col <= cursor_col < end_col
107 if cursor_line == end_pos.line_position:
108 return cursor_position.character < end_pos.cursor_position
110 return (
111 cursor_line > start_pos.line_position
112 or start_pos.cursor_position <= cursor_position.character
113 )
116def _field_at_position(
117 stanza: Deb822ParagraphElement,
118 stanza_metadata: S,
119 stanza_range: TERange,
120 position: Position,
121) -> tuple[Deb822KeyValuePairElement | None, F | None, str, bool]:
122 te_range = TERange(stanza_range.start_pos, stanza_range.start_pos)
123 for token_or_element in stanza.iter_parts(): 123 ↛ 151line 123 didn't jump to line 151 because the loop on line 123 didn't complete
124 te_range = token_or_element.size().relative_to(te_range.end_pos)
125 if not in_range(te_range, position):
126 continue
127 if isinstance(token_or_element, Deb822KeyValuePairElement): 127 ↛ 123line 127 didn't jump to line 123 because the condition on line 127 was always true
128 value_range = token_or_element.value_element.range_in_parent().relative_to(
129 te_range.start_pos
130 )
131 known_field = stanza_metadata.get(token_or_element.field_name)
132 in_value = in_range(value_range, position)
133 interpreter = (
134 known_field.field_value_class.interpreter()
135 if known_field is not None
136 else None
137 )
138 matched_value = ""
139 if in_value and interpreter is not None:
140 interpreted = token_or_element.interpret_as(interpreter)
141 for value_ref in interpreted.iter_value_references():
142 value_token_range = (
143 value_ref.locatable.range_in_parent().relative_to(
144 value_range.start_pos
145 )
146 )
147 if in_range(value_token_range, position, inclusive_end=True): 147 ↛ 141line 147 didn't jump to line 141 because the condition on line 147 was always true
148 matched_value = value_ref.value
149 break
150 return token_or_element, known_field, matched_value, in_value
151 return None, None, "", False
154def _allow_stanza_continuation(
155 token_or_element: TokenOrElement,
156 is_completion: bool,
157) -> bool:
158 if not is_completion:
159 return False
160 if token_or_element.is_error or token_or_element.is_comment:
161 return True
162 return (
163 token_or_element.is_whitespace
164 and token_or_element.convert_to_text().count("\n") < 2
165 )
168def _at_cursor(
169 deb822_file: Deb822FileElement,
170 file_metadata: Deb822FileMetadata[S, F],
171 doc: "TextDocument",
172 lines: list[str],
173 client_position: Position,
174 is_completion: bool = False,
175) -> tuple[
176 Position,
177 str | None,
178 str,
179 bool,
180 S | None,
181 F | None,
182 Iterable[Deb822ParagraphElement],
183]:
184 server_position = doc.position_codec.position_from_client_units(
185 lines,
186 client_position,
187 )
188 te_range = TERange(
189 START_POSITION,
190 START_POSITION,
191 )
192 paragraph_no = -1
193 previous_stanza: Deb822ParagraphElement | None = None
194 next_stanza: Deb822ParagraphElement | None = None
195 current_word = doc.word_at_position(client_position)
196 in_value: bool = False
197 file_iter = iter(deb822_file.iter_parts())
198 matched_token: TokenOrElement | None = None
199 matched_field: str | None = None
200 stanza_metadata: S | None = None
201 known_field: F | None = None
203 for token_or_element in file_iter: 203 ↛ 227line 203 didn't jump to line 227 because the loop on line 203 didn't complete
204 te_range = token_or_element.size().relative_to(te_range.end_pos)
205 if isinstance(token_or_element, Deb822ParagraphElement):
206 previous_stanza = token_or_element
207 paragraph_no += 1
208 elif not _allow_stanza_continuation(token_or_element, is_completion):
209 previous_stanza = None
210 if not in_range(te_range, server_position):
211 continue
212 matched_token = token_or_element
213 if isinstance(token_or_element, Deb822ParagraphElement):
214 stanza_metadata = file_metadata.guess_stanza_classification_by_idx(
215 paragraph_no
216 )
217 kvpair, known_field, current_word, in_value = _field_at_position(
218 token_or_element,
219 stanza_metadata,
220 te_range,
221 server_position,
222 )
223 if kvpair is not None: 223 ↛ 225line 223 didn't jump to line 225 because the condition on line 223 was always true
224 matched_field = kvpair.field_name
225 break
227 if matched_token is not None and _allow_stanza_continuation(
228 matched_token,
229 is_completion,
230 ):
231 next_te = next(file_iter, None)
232 if isinstance(next_te, Deb822ParagraphElement):
233 next_stanza = next_te
235 stanza_parts = (p for p in (previous_stanza, next_stanza) if p is not None)
237 if stanza_metadata is None and is_completion:
238 if paragraph_no < 0: 238 ↛ 239line 238 didn't jump to line 239 because the condition on line 238 was never true
239 paragraph_no = 0
240 stanza_metadata = file_metadata.guess_stanza_classification_by_idx(paragraph_no)
242 return (
243 server_position,
244 matched_field,
245 current_word,
246 in_value,
247 stanza_metadata,
248 known_field,
249 stanza_parts,
250 )
253def deb822_completer(
254 ls: "DebputyLanguageServer",
255 params: CompletionParams,
256 file_metadata: Deb822FileMetadata[Any, Any],
257) -> CompletionList | Sequence[CompletionItem] | None:
258 doc = ls.workspace.get_text_document(params.text_document.uri)
259 lines = doc.lines
260 lint_state = ls.lint_state(doc)
261 deb822_file = lint_state.parsed_deb822_file_content
262 if not file_metadata.file_metadata_applies_to_file(deb822_file): 262 ↛ 263line 262 didn't jump to line 263 because the condition on line 262 was never true
263 return None
265 (
266 server_pos,
267 current_field,
268 word_at_position,
269 in_value,
270 stanza_metadata,
271 known_field,
272 matched_stanzas,
273 ) = _at_cursor(
274 deb822_file,
275 file_metadata,
276 doc,
277 lines,
278 params.position,
279 is_completion=True,
280 )
282 if lines[server_pos.line].startswith("#"): 282 ↛ 283line 282 didn't jump to line 283 because the condition on line 282 was never true
283 return
285 items: Sequence[CompletionItem] | None
286 markdown_kind = ls.completion_item_document_markup(
287 MarkupKind.Markdown, MarkupKind.PlainText
288 )
289 if in_value:
290 _info(f"Completion for field value {current_field} -- {word_at_position}")
291 if known_field is None: 291 ↛ 292line 291 didn't jump to line 292 because the condition on line 291 was never true
292 return None
293 value_being_completed = word_at_position
294 items = known_field.value_options_for_completer(
295 lint_state,
296 list(matched_stanzas),
297 value_being_completed,
298 markdown_kind,
299 )
300 else:
301 _info("Completing field name")
302 assert stanza_metadata is not None
303 items = _complete_field_name(
304 lint_state,
305 stanza_metadata,
306 matched_stanzas,
307 markdown_kind,
308 )
310 return items
313def deb822_hover(
314 ls: "DebputyLanguageServer",
315 params: HoverParams,
316 file_metadata: Deb822FileMetadata[S, F],
317 *,
318 custom_handler: None | (
319 Callable[
320 [
321 "DebputyLanguageServer",
322 Position,
323 str | None,
324 str,
325 F | None,
326 bool,
327 "TextDocument",
328 list[str],
329 ],
330 Hover | None,
331 ]
332 ) = None,
333) -> Hover | None:
334 doc = ls.workspace.get_text_document(params.text_document.uri)
335 deb822_file = ls.lint_state(doc).parsed_deb822_file_content
336 if not file_metadata.file_metadata_applies_to_file(deb822_file): 336 ↛ 337line 336 didn't jump to line 337 because the condition on line 336 was never true
337 return None
338 lines = doc.lines
339 (
340 server_pos,
341 current_field,
342 word_at_position,
343 in_value,
344 _,
345 known_field,
346 _,
347 ) = _at_cursor(
348 deb822_file,
349 file_metadata,
350 doc,
351 lines,
352 params.position,
353 )
355 if lines[server_pos.line].startswith("#"): 355 ↛ 356line 355 didn't jump to line 356 because the condition on line 355 was never true
356 return
358 hover_text = None
359 if custom_handler is not None: 359 ↛ 374line 359 didn't jump to line 374 because the condition on line 359 was always true
360 res = custom_handler(
361 ls,
362 server_pos,
363 current_field,
364 word_at_position,
365 known_field,
366 in_value,
367 doc,
368 lines,
369 )
370 if isinstance(res, Hover): 370 ↛ 371line 370 didn't jump to line 371 because the condition on line 370 was never true
371 return res
372 hover_text = res
374 if hover_text is None:
375 if current_field is None: 375 ↛ 376line 375 didn't jump to line 376 because the condition on line 375 was never true
376 _info("No hover information as we cannot determine which field it is for")
377 return None
379 if known_field is None: 379 ↛ 380line 379 didn't jump to line 380 because the condition on line 379 was never true
380 return None
381 if in_value:
382 if not known_field.known_values: 382 ↛ 384line 382 didn't jump to line 384 because the condition on line 382 was always true
383 return None
384 keyword = known_field.known_values.get(word_at_position)
385 if keyword is None:
386 return None
387 hover_text = keyword.long_description_translated(ls)
388 if hover_text is not None:
389 header = "`{VALUE}` (Field: {FIELD_NAME})".format(
390 VALUE=keyword.value,
391 FIELD_NAME=known_field.name,
392 )
393 hover_text = f"# {header}\n\n{hover_text}"
394 else:
395 hover_text = known_field.long_description_translated(ls)
396 if hover_text is None: 396 ↛ 397line 396 didn't jump to line 397 because the condition on line 396 was never true
397 hover_text = (
398 f"No documentation is available for the field {current_field}."
399 )
400 hover_text = f"# {known_field.name}\n\n{hover_text}"
402 if hover_text is None: 402 ↛ 403line 402 didn't jump to line 403 because the condition on line 402 was never true
403 return None
404 return Hover(
405 contents=MarkupContent(
406 kind=ls.hover_markup_format(MarkupKind.Markdown, MarkupKind.PlainText),
407 value=hover_text,
408 )
409 )
412def deb822_token_iter(
413 tokens: Iterable[Deb822Token],
414) -> Iterator[tuple[Deb822Token, int, int, int, int]]:
415 line_no = 0
416 line_offset = 0
418 for token in tokens:
419 start_line = line_no
420 start_line_offset = line_offset
422 newlines = token.text.count("\n")
423 line_no += newlines
424 text_len = len(token.text)
425 if newlines:
426 if token.text.endswith("\n"): 426 ↛ 430line 426 didn't jump to line 430 because the condition on line 426 was always true
427 line_offset = 0
428 else:
429 # -2, one to remove the "\n" and one to get 0-offset
430 line_offset = text_len - token.text.rindex("\n") - 2
431 else:
432 line_offset += text_len
434 yield token, start_line, start_line_offset, line_no, line_offset
437def deb822_folding_ranges(
438 ls: "DebputyLanguageServer",
439 params: FoldingRangeParams,
440 file_metadata: Deb822FileMetadata[Any, Any],
441) -> Sequence[FoldingRange] | None:
442 doc = ls.workspace.get_text_document(params.text_document.uri)
443 deb822_file = ls.lint_state(doc).parsed_deb822_file_content
444 if not file_metadata.file_metadata_applies_to_file(deb822_file):
445 return None
446 comment_start = -1
447 folding_ranges = []
448 for (
449 token,
450 start_line,
451 start_offset,
452 end_line,
453 end_offset,
454 ) in deb822_token_iter(deb822_file.iter_tokens()):
455 if token.is_comment:
456 if comment_start < 0:
457 comment_start = start_line
458 elif comment_start > -1:
459 comment_start = -1
460 folding_range = FoldingRange(
461 comment_start,
462 end_line,
463 kind=FoldingRangeKind.Comment,
464 )
466 folding_ranges.append(folding_range)
468 return folding_ranges
471class Deb822SemanticTokensState(SemanticTokensState):
473 __slots__ = (
474 "file_metadata",
475 "keyword_token_code",
476 "known_value_token_code",
477 "comment_token_code",
478 "substvars_token_code",
479 "operator_token_code",
480 "relation_restriction_token_code",
481 "package_token_code",
482 "allow_overlapping_tokens",
483 )
485 def __init__(
486 self,
487 ls: "DebputyLanguageServer",
488 doc: "TextDocument",
489 lines: list[str],
490 tokens: list[int],
491 file_metadata: Deb822FileMetadata[Any, Any],
492 ) -> None:
493 super().__init__(ls, doc, lines, tokens)
494 self.file_metadata = file_metadata
496 self.keyword_token_code = SEMANTIC_TOKEN_TYPES_IDS[SemanticTokenTypes.Keyword]
497 self.known_value_token_code = SEMANTIC_TOKEN_TYPES_IDS[
498 SemanticTokenTypes.EnumMember
499 ]
500 self.comment_token_code = SEMANTIC_TOKEN_TYPES_IDS[
501 SemanticTokenTypes.Comment.value
502 ]
503 self.substvars_token_code = SEMANTIC_TOKEN_TYPES_IDS[SemanticTokenTypes.Macro]
504 self.operator_token_code = SEMANTIC_TOKEN_TYPES_IDS[SemanticTokenTypes.Operator]
505 self.relation_restriction_token_code = SEMANTIC_TOKEN_TYPES_IDS[
506 SemanticTokenTypes.TypeParameter
507 ]
508 self.package_token_code = SEMANTIC_TOKEN_TYPES_IDS[SemanticTokenTypes.Variable]
511def _emit_tokens_for_comment_element(
512 sem_token_state: Deb822SemanticTokensState,
513 comment_element: Deb822CommentElement,
514 comment_start_line: int,
515 comment_token_code: int,
516) -> None:
517 for comment_line_no, comment_token in enumerate(
518 comment_element.iter_parts(),
519 start=comment_start_line,
520 ):
521 assert comment_token.is_comment
522 assert isinstance(comment_token, Deb822Token)
523 sem_token_state.emit_token(
524 Position(comment_line_no, 0),
525 len(comment_token.text.rstrip()),
526 comment_token_code,
527 )
530async def scan_for_syntax_errors_and_token_level_diagnostics(
531 deb822_file: Deb822FileElement,
532 lint_state: LintState,
533) -> int:
534 first_error = len(lint_state.lines) + 1
535 spell_checker = lint_state.spellchecker()
537 async for (
538 token,
539 start_line,
540 start_offset,
541 end_line,
542 end_offset,
543 ) in lint_state.slow_iter(deb822_token_iter(deb822_file.iter_tokens())):
544 if token.is_error: 544 ↛ 545line 544 didn't jump to line 545 because the condition on line 544 was never true
545 first_error = min(first_error, start_line)
546 token_range = TERange(
547 TEPosition(
548 start_line,
549 start_offset,
550 ),
551 TEPosition(
552 end_line,
553 end_offset,
554 ),
555 )
556 lint_state.emit_diagnostic(
557 token_range,
558 "Syntax error",
559 "error",
560 "debputy",
561 )
562 elif token.is_comment:
563 if not lint_state.debputy_config.config_value(DCO_SPELLCHECK_COMMENTS):
564 continue
566 for word, col_pos, end_col_pos in spell_checker.iter_words(token.text):
567 corrections = spell_checker.provide_corrections_for(word)
568 if not corrections:
569 continue
570 word_range = TERange.between(
571 TEPosition(
572 start_line,
573 col_pos,
574 ),
575 TEPosition(
576 start_line,
577 end_col_pos,
578 ),
579 )
580 lint_state.emit_diagnostic(
581 word_range,
582 f'Spelling "{word}"',
583 "spelling",
584 "debputy",
585 quickfixes=[propose_correct_text_quick_fix(c) for c in corrections],
586 enable_non_interactive_auto_fix=False,
587 )
588 return first_error
591def _emit_relation_token(
592 sem_token_state: Deb822SemanticTokensState,
593 token_code: int | None,
594 m: re.Match[str],
595 group_name: str,
596 value_range_te: TERange,
597) -> None:
598 token_value = m.group(group_name)
599 token_start = m.start(group_name)
600 if token_value is None or token_start is None:
601 return
602 pos = TEPosition(
603 value_range_te.start_pos.line_position,
604 value_range_te.start_pos.cursor_position + token_start,
605 )
606 end_pos = TEPosition(
607 pos.line_position,
608 pos.cursor_position + len(token_value),
609 )
611 _process_value_with_substvars(
612 sem_token_state,
613 token_value,
614 TERange.between(pos, end_pos),
615 token_code,
616 )
619async def _deb822_relationship_field_semantic_tokens_full(
620 sem_token_state: Deb822SemanticTokensState,
621 interpretation: Interpretation[Deb822ParsedTokenList[Any, Any]],
622 kvpair: Deb822KeyValuePairElement,
623 value_element_pos: TEPosition,
624) -> None:
625 doc = sem_token_state.doc
626 parts = kvpair.interpret_as(interpretation).iter_parts()
627 comment_token_code = sem_token_state.comment_token_code
628 operator_token_code = sem_token_state.operator_token_code
629 relation_restriction_token_code = sem_token_state.relation_restriction_token_code
630 package_token_code = sem_token_state.package_token_code
632 for te in parts:
633 if te.is_whitespace:
634 continue
635 if te.is_separator:
636 continue
638 value_range_in_parent_te = te.range_in_parent()
639 value_range_te = value_range_in_parent_te.relative_to(value_element_pos)
640 value = te.convert_to_text()
641 if te.is_comment:
642 token_type = comment_token_code
643 value = value.rstrip()
644 value_len = doc.position_codec.client_num_units(value)
645 sem_token_state.emit_token(
646 te_position_to_lsp(value_range_te.start_pos),
647 value_len,
648 token_type,
649 )
650 else:
651 m = _DEP_RELATION_CLAUSE.fullmatch(value)
652 _emit_relation_token(
653 sem_token_state,
654 package_token_code,
655 m,
656 "name_arch_qual",
657 value_range_te,
658 )
660 _emit_relation_token(
661 sem_token_state,
662 operator_token_code,
663 m,
664 "operator",
665 value_range_te,
666 )
667 _emit_relation_token(
668 sem_token_state,
669 None,
670 m,
671 "version",
672 value_range_te,
673 )
674 _emit_relation_token(
675 sem_token_state,
676 relation_restriction_token_code,
677 m,
678 "arch_restriction",
679 value_range_te,
680 )
681 _emit_relation_token(
682 sem_token_state,
683 relation_restriction_token_code,
684 m,
685 "build_profile_restriction",
686 value_range_te,
687 )
690async def _deb822_paragraph_semantic_tokens_full(
691 ls: "DebputyLanguageServer",
692 sem_token_state: Deb822SemanticTokensState,
693 stanza: Deb822ParagraphElement,
694 stanza_range_in_file: "TERange",
695 stanza_idx: int,
696) -> None:
697 doc = sem_token_state.doc
698 keyword_token_code = sem_token_state.keyword_token_code
699 known_value_token_code = sem_token_state.known_value_token_code
700 comment_token_code = sem_token_state.comment_token_code
702 stanza_position = stanza_range_in_file.start_pos
703 stanza_metadata = sem_token_state.file_metadata.classify_stanza(
704 stanza,
705 stanza_idx=stanza_idx,
706 )
707 async for kvpair_range, kvpair in ls.slow_iter(
708 with_range_in_continuous_parts(
709 stanza.iter_parts(),
710 start_relative_to=stanza_position,
711 ),
712 yield_every=25,
713 ):
714 if not isinstance(kvpair, Deb822KeyValuePairElement): 714 ↛ 715line 714 didn't jump to line 715 because the condition on line 714 was never true
715 continue
716 kvpair_position = kvpair_range.start_pos
717 field_start = kvpair.field_token.position_in_parent().relative_to(
718 kvpair_position
719 )
720 comment = kvpair.comment_element
721 if comment:
722 comment_start_line = field_start.line_position - len(comment)
723 _emit_tokens_for_comment_element(
724 sem_token_state,
725 comment,
726 comment_start_line,
727 comment_token_code,
728 )
730 field_size = doc.position_codec.client_num_units(kvpair.field_name)
732 sem_token_state.emit_token(
733 te_position_to_lsp(field_start),
734 field_size,
735 keyword_token_code,
736 )
738 known_field: Deb822KnownField | None = stanza_metadata.get(kvpair.field_name)
739 value_element_pos = kvpair.value_element.position_in_parent().relative_to(
740 kvpair_position
741 )
742 if known_field is not None:
743 if known_field.spellcheck_value:
744 continue
745 interpretation = known_field.field_value_class.interpreter()
746 if (
747 getattr(known_field, "is_relationship_field", False)
748 and interpretation is not None
749 ):
750 await _deb822_relationship_field_semantic_tokens_full(
751 sem_token_state,
752 interpretation,
753 kvpair,
754 value_element_pos,
755 )
756 continue
757 known_values: Container[str] = known_field.known_values or frozenset()
758 field_disallows_substvars = (
759 known_field.is_substvars_disabled_even_if_allowed_by_stanza
760 )
761 allow_substvars = (
762 stanza_metadata.is_substvars_allowed_in_stanza
763 and not field_disallows_substvars
764 )
765 else:
766 known_values = frozenset()
767 interpretation = None
768 allow_substvars = stanza_metadata.is_substvars_allowed_in_stanza
770 if interpretation is None:
771 for value_line in kvpair.value_element.value_lines:
772 comment_element = value_line.comment_element
773 if comment_element:
774 assert comment_element.position_in_parent().line_position == 0
775 comment_start_line = (
776 value_line.position_in_parent()
777 .relative_to(value_element_pos)
778 .line_position
779 )
780 _emit_tokens_for_comment_element(
781 sem_token_state,
782 comment_element,
783 comment_start_line,
784 comment_token_code,
785 )
786 continue
787 else:
788 parts = kvpair.interpret_as(interpretation).iter_parts()
789 for te in parts:
790 if te.is_whitespace:
791 continue
792 if te.is_separator: 792 ↛ 793line 792 didn't jump to line 793 because the condition on line 792 was never true
793 continue
794 value_range_in_parent_te = te.range_in_parent()
795 value_range_te = value_range_in_parent_te.relative_to(value_element_pos)
796 value = te.convert_to_text()
797 if te.is_comment: 797 ↛ 798line 797 didn't jump to line 798 because the condition on line 797 was never true
798 token_type = comment_token_code
799 value = value.rstrip()
800 elif value in known_values:
801 token_type = known_value_token_code
802 elif allow_substvars and "${" in value: 802 ↛ 803line 802 didn't jump to line 803 because the condition on line 802 was never true
803 _process_value_with_substvars(
804 sem_token_state,
805 value,
806 value_range_te,
807 None,
808 )
809 continue
810 else:
811 continue
812 value_len = doc.position_codec.client_num_units(value)
813 sem_token_state.emit_token(
814 te_position_to_lsp(value_range_te.start_pos),
815 value_len,
816 token_type,
817 )
820def _split_into_substvars(
821 value: str,
822 base_token_type: int | None,
823 substvar_token_type: int,
824) -> Iterable[tuple[str, int | None]]:
826 i = 0
827 next_search = i
828 full_value_len = len(value)
829 while i < full_value_len:
830 try:
831 subst_var_start = value.index("${", next_search)
832 subst_var_end = value.index("}", subst_var_start + 2)
833 except ValueError:
834 token = value[i:full_value_len]
835 if token: 835 ↛ 837line 835 didn't jump to line 837 because the condition on line 835 was always true
836 yield token, base_token_type
837 return
839 subst_var_end += 1
840 subst_var = value[subst_var_start:subst_var_end]
841 if subst_var != "${}" and not SUBSTVAR_RE.match(subst_var): 841 ↛ 842line 841 didn't jump to line 842 because the condition on line 841 was never true
842 subst_var = None
844 if subst_var is None: 844 ↛ 845line 844 didn't jump to line 845 because the condition on line 844 was never true
845 next_search = subst_var_end
846 continue
848 token = value[i:subst_var_start]
849 if token: 849 ↛ 850line 849 didn't jump to line 850 because the condition on line 849 was never true
850 yield token, base_token_type
851 yield subst_var, substvar_token_type
852 i = subst_var_end
853 next_search = i
856def _process_value_with_substvars(
857 sem_token_state: Deb822SemanticTokensState,
858 value: str,
859 value_range_te: "TERange",
860 base_token_type: int | None,
861) -> None:
862 pos_codec = sem_token_state.doc.position_codec
864 # TODO: Support overlapping tokens if the editor does.
866 line = value_range_te.start_pos.line_position
867 token_pos = value_range_te.start_pos.cursor_position
868 substvar_token_code = sem_token_state.substvars_token_code
869 for token, token_type in _split_into_substvars(
870 value,
871 base_token_type,
872 substvar_token_code,
873 ):
874 token_len = len(token)
875 if token_type is not None:
876 sem_token_state.emit_token(
877 types.Position(line, token_pos),
878 pos_codec.client_num_units(token),
879 token_type,
880 )
881 token_pos += token_len
884def deb822_format_file(
885 lint_state: LintState,
886 file_metadata: Deb822FileMetadata[Any, Any],
887) -> Sequence[TextEdit] | None:
888 deb822_file = lint_state.parsed_deb822_file_content
889 if not file_metadata.file_metadata_applies_to_file(deb822_file): 889 ↛ 890line 889 didn't jump to line 890 because the condition on line 889 was never true
890 return None
891 effective_preference = lint_state.effective_preference
892 if effective_preference is None:
893 return trim_end_of_line_whitespace(lint_state.position_codec, lint_state.lines)
894 formatter = effective_preference.deb822_formatter()
895 lines = lint_state.lines
896 deb822_file = lint_state.parsed_deb822_file_content
897 if deb822_file is None: 897 ↛ 898line 897 didn't jump to line 898 because the condition on line 897 was never true
898 _warn("The deb822 result missing failed!?")
899 return None
901 return list(
902 file_metadata.reformat(
903 effective_preference,
904 deb822_file,
905 formatter,
906 lint_state.content,
907 lint_state.position_codec,
908 lines,
909 )
910 )
913async def deb822_semantic_tokens_full(
914 ls: "DebputyLanguageServer",
915 request: SemanticTokensParams,
916 file_metadata: Deb822FileMetadata[Any, Any],
917) -> SemanticTokens | None:
918 doc = ls.workspace.get_text_document(request.text_document.uri)
919 deb822_file = ls.lint_state(doc).parsed_deb822_file_content
920 if not file_metadata.file_metadata_applies_to_file(deb822_file): 920 ↛ 921line 920 didn't jump to line 921 because the condition on line 920 was never true
921 return None
922 position_codec = doc.position_codec
923 lines = doc.lines
924 if deb822_file is None: 924 ↛ 925line 924 didn't jump to line 925 because the condition on line 924 was never true
925 _warn("The deb822 result missing failed!?")
926 ls.show_message_log(
927 "Internal error; could not get deb822 content!?", MessageType.Warning
928 )
929 return None
931 tokens: list[int] = []
932 sem_token_state = Deb822SemanticTokensState(
933 ls,
934 doc,
935 lines,
936 tokens,
937 file_metadata,
938 )
940 comment_token_code = sem_token_state.comment_token_code
942 stanza_idx = 0
944 async for part_range, part in ls.slow_iter(
945 with_range_in_continuous_parts(deb822_file.iter_parts()), yield_every=20
946 ):
947 if part.is_comment:
948 pos = part_range.start_pos
949 sem_token_state.emit_token(
950 te_position_to_lsp(pos),
951 # Avoid trailing newline
952 position_codec.client_num_units(part.convert_to_text().rstrip()),
953 comment_token_code,
954 )
955 elif isinstance(part, Deb822ParagraphElement):
956 await _deb822_paragraph_semantic_tokens_full(
957 ls,
958 sem_token_state,
959 part,
960 part_range,
961 stanza_idx,
962 )
963 stanza_idx += 1
964 if not tokens: 964 ↛ 965line 964 didn't jump to line 965 because the condition on line 964 was never true
965 return None
966 return SemanticTokens(tokens)
969def _complete_field_name(
970 lint_state: LintState,
971 stanza_metadata: StanzaMetadata[Any],
972 matched_stanzas: Iterable[Deb822ParagraphElement],
973 markdown_kind: MarkupKind,
974) -> Sequence[CompletionItem]:
975 items = []
976 matched_stanzas = list(matched_stanzas)
977 seen_fields = {
978 stanza_metadata.normalize_field_name(f.lower())
979 for f in chain.from_iterable(
980 # The typing from python3-debian is not entirely optimal here. The iter always return a
981 # `str`, but the provided type is `ParagraphKey` (because `__getitem__` supports those)
982 # and that is not exclusively a `str`.
983 #
984 # So, this cast for now
985 cast("Iterable[str]", s)
986 for s in matched_stanzas
987 )
988 }
989 for cand_key, cand in stanza_metadata.items():
990 if stanza_metadata.normalize_field_name(cand_key.lower()) in seen_fields:
991 continue
992 item = cand.complete_field(lint_state, matched_stanzas, markdown_kind)
993 if item is not None:
994 items.append(item)
995 return items