Coverage for src/debputy/lsp/lsp_generic_deb822.py: 82%
411 statements
« prev ^ index » next coverage.py v7.8.2, created at 2025-09-07 09:27 +0000
« prev ^ index » next coverage.py v7.8.2, created at 2025-09-07 09:27 +0000
1import re
2from itertools import chain
3from typing import (
4 Optional,
5 Union,
6 Sequence,
7 Tuple,
8 Any,
9 Container,
10 List,
11 Iterable,
12 Iterator,
13 Callable,
14 cast,
15 TYPE_CHECKING,
16)
18from debputy.linting.lint_util import (
19 LintState,
20 te_position_to_lsp,
21 with_range_in_continuous_parts,
22)
23from debputy.lsp.config.config_options import DCO_SPELLCHECK_COMMENTS
24from debputy.lsp.debputy_ls import DebputyLanguageServer
25from debputy.lsp.lsp_debian_control_reference_data import (
26 Deb822FileMetadata,
27 Deb822KnownField,
28 StanzaMetadata,
29 F,
30 S,
31 SUBSTVAR_RE,
32 _DEP_RELATION_CLAUSE,
33)
34from debputy.lsp.lsp_features import SEMANTIC_TOKEN_TYPES_IDS
35from debputy.lsp.quickfixes import propose_correct_text_quick_fix
36from debputy.lsp.text_util import (
37 trim_end_of_line_whitespace,
38 SemanticTokensState,
39)
40from debputy.lsp.vendoring._deb822_repro.locatable import (
41 START_POSITION,
42 Range as TERange,
43 Position as TEPosition,
44)
45from debputy.lsp.vendoring._deb822_repro.parsing import (
46 Deb822KeyValuePairElement,
47 Deb822ParagraphElement,
48 Deb822FileElement,
49 Deb822CommentElement,
50 Deb822ParsedTokenList,
51 Interpretation,
52)
53from debputy.lsp.vendoring._deb822_repro.tokens import tokenize_deb822_file, Deb822Token
54from debputy.lsp.vendoring._deb822_repro.types import TokenOrElement
55from debputy.lsprotocol.types import (
56 CompletionParams,
57 CompletionList,
58 CompletionItem,
59 Position,
60 MarkupContent,
61 Hover,
62 MarkupKind,
63 HoverParams,
64 FoldingRangeParams,
65 FoldingRange,
66 FoldingRangeKind,
67 SemanticTokensParams,
68 SemanticTokens,
69 TextEdit,
70 MessageType,
71 SemanticTokenTypes,
72)
73from debputy.util import _info, _warn
75if TYPE_CHECKING:
76 import lsprotocol.types as types
77else:
78 import debputy.lsprotocol.types as types
81try:
82 from pygls.server import LanguageServer
83 from pygls.workspace import TextDocument
84except ImportError:
85 pass
88_CONTAINS_SPACE_OR_COLON = re.compile(r"[\s:]")
91def in_range(
92 te_range: TERange,
93 cursor_position: Position,
94 *,
95 inclusive_end: bool = False,
96) -> bool:
97 cursor_line = cursor_position.line
98 start_pos = te_range.start_pos
99 end_pos = te_range.end_pos
100 if cursor_line < start_pos.line_position or cursor_line > end_pos.line_position:
101 return False
103 if start_pos.line_position == end_pos.line_position:
104 start_col = start_pos.cursor_position
105 cursor_col = cursor_position.character
106 end_col = end_pos.cursor_position
107 if inclusive_end: 107 ↛ 109line 107 didn't jump to line 109 because the condition on line 107 was always true
108 return start_col <= cursor_col <= end_col
109 return start_col <= cursor_col < end_col
111 if cursor_line == end_pos.line_position:
112 return cursor_position.character < end_pos.cursor_position
114 return (
115 cursor_line > start_pos.line_position
116 or start_pos.cursor_position <= cursor_position.character
117 )
120def _field_at_position(
121 stanza: Deb822ParagraphElement,
122 stanza_metadata: S,
123 stanza_range: TERange,
124 position: Position,
125) -> Tuple[Optional[Deb822KeyValuePairElement], Optional[F], str, bool]:
126 te_range = TERange(stanza_range.start_pos, stanza_range.start_pos)
127 for token_or_element in stanza.iter_parts(): 127 ↛ 155line 127 didn't jump to line 155 because the loop on line 127 didn't complete
128 te_range = token_or_element.size().relative_to(te_range.end_pos)
129 if not in_range(te_range, position):
130 continue
131 if isinstance(token_or_element, Deb822KeyValuePairElement): 131 ↛ 127line 131 didn't jump to line 127 because the condition on line 131 was always true
132 value_range = token_or_element.value_element.range_in_parent().relative_to(
133 te_range.start_pos
134 )
135 known_field = stanza_metadata.get(token_or_element.field_name)
136 in_value = in_range(value_range, position)
137 interpreter = (
138 known_field.field_value_class.interpreter()
139 if known_field is not None
140 else None
141 )
142 matched_value = ""
143 if in_value and interpreter is not None:
144 interpreted = token_or_element.interpret_as(interpreter)
145 for value_ref in interpreted.iter_value_references():
146 value_token_range = (
147 value_ref.locatable.range_in_parent().relative_to(
148 value_range.start_pos
149 )
150 )
151 if in_range(value_token_range, position, inclusive_end=True): 151 ↛ 145line 151 didn't jump to line 145 because the condition on line 151 was always true
152 matched_value = value_ref.value
153 break
154 return token_or_element, known_field, matched_value, in_value
155 return None, None, "", False
158def _allow_stanza_continuation(
159 token_or_element: TokenOrElement,
160 is_completion: bool,
161) -> bool:
162 if not is_completion:
163 return False
164 if token_or_element.is_error or token_or_element.is_comment:
165 return True
166 return (
167 token_or_element.is_whitespace
168 and token_or_element.convert_to_text().count("\n") < 2
169 )
172def _at_cursor(
173 deb822_file: Deb822FileElement,
174 file_metadata: Deb822FileMetadata[S, F],
175 doc: "TextDocument",
176 lines: List[str],
177 client_position: Position,
178 is_completion: bool = False,
179) -> Tuple[
180 Position,
181 Optional[str],
182 str,
183 bool,
184 Optional[S],
185 Optional[F],
186 Iterable[Deb822ParagraphElement],
187]:
188 server_position = doc.position_codec.position_from_client_units(
189 lines,
190 client_position,
191 )
192 te_range = TERange(
193 START_POSITION,
194 START_POSITION,
195 )
196 paragraph_no = -1
197 previous_stanza: Optional[Deb822ParagraphElement] = None
198 next_stanza: Optional[Deb822ParagraphElement] = None
199 current_word = doc.word_at_position(client_position)
200 in_value: bool = False
201 file_iter = iter(deb822_file.iter_parts())
202 matched_token: Optional[TokenOrElement] = None
203 matched_field: Optional[str] = None
204 stanza_metadata: Optional[S] = None
205 known_field: Optional[F] = None
207 for token_or_element in file_iter: 207 ↛ 231line 207 didn't jump to line 231 because the loop on line 207 didn't complete
208 te_range = token_or_element.size().relative_to(te_range.end_pos)
209 if isinstance(token_or_element, Deb822ParagraphElement):
210 previous_stanza = token_or_element
211 paragraph_no += 1
212 elif not _allow_stanza_continuation(token_or_element, is_completion):
213 previous_stanza = None
214 if not in_range(te_range, server_position):
215 continue
216 matched_token = token_or_element
217 if isinstance(token_or_element, Deb822ParagraphElement):
218 stanza_metadata = file_metadata.guess_stanza_classification_by_idx(
219 paragraph_no
220 )
221 kvpair, known_field, current_word, in_value = _field_at_position(
222 token_or_element,
223 stanza_metadata,
224 te_range,
225 server_position,
226 )
227 if kvpair is not None: 227 ↛ 229line 227 didn't jump to line 229 because the condition on line 227 was always true
228 matched_field = kvpair.field_name
229 break
231 if matched_token is not None and _allow_stanza_continuation(
232 matched_token,
233 is_completion,
234 ):
235 next_te = next(file_iter, None)
236 if isinstance(next_te, Deb822ParagraphElement):
237 next_stanza = next_te
239 stanza_parts = (p for p in (previous_stanza, next_stanza) if p is not None)
241 if stanza_metadata is None and is_completion:
242 if paragraph_no < 0: 242 ↛ 243line 242 didn't jump to line 243 because the condition on line 242 was never true
243 paragraph_no = 0
244 stanza_metadata = file_metadata.guess_stanza_classification_by_idx(paragraph_no)
246 return (
247 server_position,
248 matched_field,
249 current_word,
250 in_value,
251 stanza_metadata,
252 known_field,
253 stanza_parts,
254 )
257def deb822_completer(
258 ls: "DebputyLanguageServer",
259 params: CompletionParams,
260 file_metadata: Deb822FileMetadata[Any, Any],
261) -> Optional[Union[CompletionList, Sequence[CompletionItem]]]:
262 doc = ls.workspace.get_text_document(params.text_document.uri)
263 lines = doc.lines
264 lint_state = ls.lint_state(doc)
265 deb822_file = lint_state.parsed_deb822_file_content
266 if not file_metadata.file_metadata_applies_to_file(deb822_file): 266 ↛ 267line 266 didn't jump to line 267 because the condition on line 266 was never true
267 return None
269 (
270 server_pos,
271 current_field,
272 word_at_position,
273 in_value,
274 stanza_metadata,
275 known_field,
276 matched_stanzas,
277 ) = _at_cursor(
278 deb822_file,
279 file_metadata,
280 doc,
281 lines,
282 params.position,
283 is_completion=True,
284 )
286 if lines[server_pos.line].startswith("#"): 286 ↛ 287line 286 didn't jump to line 287 because the condition on line 286 was never true
287 return
289 items: Optional[Sequence[CompletionItem]]
290 markdown_kind = ls.completion_item_document_markup(
291 MarkupKind.Markdown, MarkupKind.PlainText
292 )
293 if in_value:
294 _info(f"Completion for field value {current_field} -- {word_at_position}")
295 if known_field is None: 295 ↛ 296line 295 didn't jump to line 296 because the condition on line 295 was never true
296 return None
297 value_being_completed = word_at_position
298 items = known_field.value_options_for_completer(
299 lint_state,
300 list(matched_stanzas),
301 value_being_completed,
302 markdown_kind,
303 )
304 else:
305 _info("Completing field name")
306 assert stanza_metadata is not None
307 items = _complete_field_name(
308 lint_state,
309 stanza_metadata,
310 matched_stanzas,
311 markdown_kind,
312 )
314 return items
317def deb822_hover(
318 ls: "DebputyLanguageServer",
319 params: HoverParams,
320 file_metadata: Deb822FileMetadata[S, F],
321 *,
322 custom_handler: Optional[
323 Callable[
324 [
325 "DebputyLanguageServer",
326 Position,
327 Optional[str],
328 str,
329 Optional[F],
330 bool,
331 "TextDocument",
332 List[str],
333 ],
334 Optional[Hover],
335 ]
336 ] = None,
337) -> Optional[Hover]:
338 doc = ls.workspace.get_text_document(params.text_document.uri)
339 deb822_file = ls.lint_state(doc).parsed_deb822_file_content
340 if not file_metadata.file_metadata_applies_to_file(deb822_file): 340 ↛ 341line 340 didn't jump to line 341 because the condition on line 340 was never true
341 return None
342 lines = doc.lines
343 (
344 server_pos,
345 current_field,
346 word_at_position,
347 in_value,
348 _,
349 known_field,
350 _,
351 ) = _at_cursor(
352 deb822_file,
353 file_metadata,
354 doc,
355 lines,
356 params.position,
357 )
359 if lines[server_pos.line].startswith("#"): 359 ↛ 360line 359 didn't jump to line 360 because the condition on line 359 was never true
360 return
362 hover_text = None
363 if custom_handler is not None: 363 ↛ 378line 363 didn't jump to line 378 because the condition on line 363 was always true
364 res = custom_handler(
365 ls,
366 server_pos,
367 current_field,
368 word_at_position,
369 known_field,
370 in_value,
371 doc,
372 lines,
373 )
374 if isinstance(res, Hover): 374 ↛ 375line 374 didn't jump to line 375 because the condition on line 374 was never true
375 return res
376 hover_text = res
378 if hover_text is None:
379 if current_field is None: 379 ↛ 380line 379 didn't jump to line 380 because the condition on line 379 was never true
380 _info("No hover information as we cannot determine which field it is for")
381 return None
383 if known_field is None: 383 ↛ 384line 383 didn't jump to line 384 because the condition on line 383 was never true
384 return None
385 if in_value:
386 if not known_field.known_values: 386 ↛ 388line 386 didn't jump to line 388 because the condition on line 386 was always true
387 return None
388 keyword = known_field.known_values.get(word_at_position)
389 if keyword is None:
390 return None
391 hover_text = keyword.long_description_translated(ls)
392 if hover_text is not None:
393 header = "`{VALUE}` (Field: {FIELD_NAME})".format(
394 VALUE=keyword.value,
395 FIELD_NAME=known_field.name,
396 )
397 hover_text = f"# {header}\n\n{hover_text}"
398 else:
399 hover_text = known_field.long_description_translated(ls)
400 if hover_text is None: 400 ↛ 401line 400 didn't jump to line 401 because the condition on line 400 was never true
401 hover_text = (
402 f"No documentation is available for the field {current_field}."
403 )
404 hover_text = f"# {known_field.name}\n\n{hover_text}"
406 if hover_text is None: 406 ↛ 407line 406 didn't jump to line 407 because the condition on line 406 was never true
407 return None
408 return Hover(
409 contents=MarkupContent(
410 kind=ls.hover_markup_format(MarkupKind.Markdown, MarkupKind.PlainText),
411 value=hover_text,
412 )
413 )
416def deb822_token_iter(
417 tokens: Iterable[Deb822Token],
418) -> Iterator[Tuple[Deb822Token, int, int, int, int]]:
419 line_no = 0
420 line_offset = 0
422 for token in tokens:
423 start_line = line_no
424 start_line_offset = line_offset
426 newlines = token.text.count("\n")
427 line_no += newlines
428 text_len = len(token.text)
429 if newlines:
430 if token.text.endswith("\n"): 430 ↛ 434line 430 didn't jump to line 434 because the condition on line 430 was always true
431 line_offset = 0
432 else:
433 # -2, one to remove the "\n" and one to get 0-offset
434 line_offset = text_len - token.text.rindex("\n") - 2
435 else:
436 line_offset += text_len
438 yield token, start_line, start_line_offset, line_no, line_offset
441def deb822_folding_ranges(
442 ls: "DebputyLanguageServer",
443 params: FoldingRangeParams,
444 file_metadata: Deb822FileMetadata[Any, Any],
445) -> Optional[Sequence[FoldingRange]]:
446 doc = ls.workspace.get_text_document(params.text_document.uri)
447 deb822_file = ls.lint_state(doc).parsed_deb822_file_content
448 if not file_metadata.file_metadata_applies_to_file(deb822_file):
449 return None
450 comment_start = -1
451 folding_ranges = []
452 for (
453 token,
454 start_line,
455 start_offset,
456 end_line,
457 end_offset,
458 ) in deb822_token_iter(deb822_file.iter_tokens()):
459 if token.is_comment:
460 if comment_start < 0:
461 comment_start = start_line
462 elif comment_start > -1:
463 comment_start = -1
464 folding_range = FoldingRange(
465 comment_start,
466 end_line,
467 kind=FoldingRangeKind.Comment,
468 )
470 folding_ranges.append(folding_range)
472 return folding_ranges
475class Deb822SemanticTokensState(SemanticTokensState):
477 __slots__ = (
478 "file_metadata",
479 "keyword_token_code",
480 "known_value_token_code",
481 "comment_token_code",
482 "substvars_token_code",
483 "operator_token_code",
484 "relation_restriction_token_code",
485 "package_token_code",
486 "allow_overlapping_tokens",
487 )
489 def __init__(
490 self,
491 ls: "DebputyLanguageServer",
492 doc: "TextDocument",
493 lines: List[str],
494 tokens: List[int],
495 file_metadata: Deb822FileMetadata[Any, Any],
496 ) -> None:
497 super().__init__(ls, doc, lines, tokens)
498 self.file_metadata = file_metadata
500 self.keyword_token_code = SEMANTIC_TOKEN_TYPES_IDS[SemanticTokenTypes.Keyword]
501 self.known_value_token_code = SEMANTIC_TOKEN_TYPES_IDS[
502 SemanticTokenTypes.EnumMember
503 ]
504 self.comment_token_code = SEMANTIC_TOKEN_TYPES_IDS[
505 SemanticTokenTypes.Comment.value
506 ]
507 self.substvars_token_code = SEMANTIC_TOKEN_TYPES_IDS[SemanticTokenTypes.Macro]
508 self.operator_token_code = SEMANTIC_TOKEN_TYPES_IDS[SemanticTokenTypes.Operator]
509 self.relation_restriction_token_code = SEMANTIC_TOKEN_TYPES_IDS[
510 SemanticTokenTypes.TypeParameter
511 ]
512 self.package_token_code = SEMANTIC_TOKEN_TYPES_IDS[SemanticTokenTypes.Variable]
515def _emit_tokens_for_comment_element(
516 sem_token_state: Deb822SemanticTokensState,
517 comment_element: Deb822CommentElement,
518 comment_start_line: int,
519 comment_token_code: int,
520) -> None:
521 for comment_line_no, comment_token in enumerate(
522 comment_element.iter_parts(),
523 start=comment_start_line,
524 ):
525 assert comment_token.is_comment
526 assert isinstance(comment_token, Deb822Token)
527 sem_token_state.emit_token(
528 Position(comment_line_no, 0),
529 len(comment_token.text.rstrip()),
530 comment_token_code,
531 )
534async def scan_for_syntax_errors_and_token_level_diagnostics(
535 deb822_file: Deb822FileElement,
536 lint_state: LintState,
537) -> int:
538 first_error = len(lint_state.lines) + 1
539 spell_checker = lint_state.spellchecker()
541 async for (
542 token,
543 start_line,
544 start_offset,
545 end_line,
546 end_offset,
547 ) in lint_state.slow_iter(deb822_token_iter(deb822_file.iter_tokens())):
548 if token.is_error: 548 ↛ 549line 548 didn't jump to line 549 because the condition on line 548 was never true
549 first_error = min(first_error, start_line)
550 token_range = TERange(
551 TEPosition(
552 start_line,
553 start_offset,
554 ),
555 TEPosition(
556 end_line,
557 end_offset,
558 ),
559 )
560 lint_state.emit_diagnostic(
561 token_range,
562 "Syntax error",
563 "error",
564 "debputy",
565 )
566 elif token.is_comment:
567 if not lint_state.debputy_config.config_value(DCO_SPELLCHECK_COMMENTS):
568 continue
570 for word, col_pos, end_col_pos in spell_checker.iter_words(token.text):
571 corrections = spell_checker.provide_corrections_for(word)
572 if not corrections:
573 continue
574 word_range = TERange.between(
575 TEPosition(
576 start_line,
577 col_pos,
578 ),
579 TEPosition(
580 start_line,
581 end_col_pos,
582 ),
583 )
584 lint_state.emit_diagnostic(
585 word_range,
586 f'Spelling "{word}"',
587 "spelling",
588 "debputy",
589 quickfixes=[propose_correct_text_quick_fix(c) for c in corrections],
590 enable_non_interactive_auto_fix=False,
591 )
592 return first_error
595def _emit_relation_token(
596 sem_token_state: Deb822SemanticTokensState,
597 token_code: Optional[int],
598 m: re.Match[str],
599 group_name: str,
600 value_range_te: TERange,
601) -> None:
602 token_value = m.group(group_name)
603 token_start = m.start(group_name)
604 if token_value is None or token_start is None:
605 return
606 pos = TEPosition(
607 value_range_te.start_pos.line_position,
608 value_range_te.start_pos.cursor_position + token_start,
609 )
610 end_pos = TEPosition(
611 pos.line_position,
612 pos.cursor_position + len(token_value),
613 )
615 _process_value_with_substvars(
616 sem_token_state,
617 token_value,
618 TERange.between(pos, end_pos),
619 token_code,
620 )
623async def _deb822_relationship_field_semantic_tokens_full(
624 sem_token_state: Deb822SemanticTokensState,
625 interpretation: Interpretation[Deb822ParsedTokenList[Any, Any]],
626 kvpair: Deb822KeyValuePairElement,
627 value_element_pos: TEPosition,
628) -> None:
629 doc = sem_token_state.doc
630 parts = kvpair.interpret_as(interpretation).iter_parts()
631 comment_token_code = sem_token_state.comment_token_code
632 operator_token_code = sem_token_state.operator_token_code
633 relation_restriction_token_code = sem_token_state.relation_restriction_token_code
634 package_token_code = sem_token_state.package_token_code
636 for te in parts:
637 if te.is_whitespace:
638 continue
639 if te.is_separator:
640 continue
642 value_range_in_parent_te = te.range_in_parent()
643 value_range_te = value_range_in_parent_te.relative_to(value_element_pos)
644 value = te.convert_to_text()
645 if te.is_comment:
646 token_type = comment_token_code
647 value = value.rstrip()
648 value_len = doc.position_codec.client_num_units(value)
649 sem_token_state.emit_token(
650 te_position_to_lsp(value_range_te.start_pos),
651 value_len,
652 token_type,
653 )
654 else:
655 m = _DEP_RELATION_CLAUSE.fullmatch(value)
656 _emit_relation_token(
657 sem_token_state,
658 package_token_code,
659 m,
660 "name_arch_qual",
661 value_range_te,
662 )
664 _emit_relation_token(
665 sem_token_state,
666 operator_token_code,
667 m,
668 "operator",
669 value_range_te,
670 )
671 _emit_relation_token(
672 sem_token_state,
673 None,
674 m,
675 "version",
676 value_range_te,
677 )
678 _emit_relation_token(
679 sem_token_state,
680 relation_restriction_token_code,
681 m,
682 "arch_restriction",
683 value_range_te,
684 )
685 _emit_relation_token(
686 sem_token_state,
687 relation_restriction_token_code,
688 m,
689 "build_profile_restriction",
690 value_range_te,
691 )
694async def _deb822_paragraph_semantic_tokens_full(
695 ls: "DebputyLanguageServer",
696 sem_token_state: Deb822SemanticTokensState,
697 stanza: Deb822ParagraphElement,
698 stanza_range_in_file: "TERange",
699 stanza_idx: int,
700) -> None:
701 doc = sem_token_state.doc
702 keyword_token_code = sem_token_state.keyword_token_code
703 known_value_token_code = sem_token_state.known_value_token_code
704 comment_token_code = sem_token_state.comment_token_code
706 stanza_position = stanza_range_in_file.start_pos
707 stanza_metadata = sem_token_state.file_metadata.classify_stanza(
708 stanza,
709 stanza_idx=stanza_idx,
710 )
711 async for kvpair_range, kvpair in ls.slow_iter(
712 with_range_in_continuous_parts(
713 stanza.iter_parts(),
714 start_relative_to=stanza_position,
715 ),
716 yield_every=25,
717 ):
718 if not isinstance(kvpair, Deb822KeyValuePairElement): 718 ↛ 719line 718 didn't jump to line 719 because the condition on line 718 was never true
719 continue
720 kvpair_position = kvpair_range.start_pos
721 field_start = kvpair.field_token.position_in_parent().relative_to(
722 kvpair_position
723 )
724 comment = kvpair.comment_element
725 if comment:
726 comment_start_line = field_start.line_position - len(comment)
727 _emit_tokens_for_comment_element(
728 sem_token_state,
729 comment,
730 comment_start_line,
731 comment_token_code,
732 )
734 field_size = doc.position_codec.client_num_units(kvpair.field_name)
736 sem_token_state.emit_token(
737 te_position_to_lsp(field_start),
738 field_size,
739 keyword_token_code,
740 )
742 known_field: Optional[Deb822KnownField] = stanza_metadata.get(kvpair.field_name)
743 value_element_pos = kvpair.value_element.position_in_parent().relative_to(
744 kvpair_position
745 )
746 if known_field is not None:
747 if known_field.spellcheck_value:
748 continue
749 interpretation = known_field.field_value_class.interpreter()
750 if (
751 getattr(known_field, "is_relationship_field", False)
752 and interpretation is not None
753 ):
754 await _deb822_relationship_field_semantic_tokens_full(
755 sem_token_state,
756 interpretation,
757 kvpair,
758 value_element_pos,
759 )
760 continue
761 known_values: Container[str] = known_field.known_values or frozenset()
762 field_disallows_substvars = (
763 known_field.is_substvars_disabled_even_if_allowed_by_stanza
764 )
765 allow_substvars = (
766 stanza_metadata.is_substvars_allowed_in_stanza
767 and not field_disallows_substvars
768 )
769 else:
770 known_values = frozenset()
771 interpretation = None
772 allow_substvars = stanza_metadata.is_substvars_allowed_in_stanza
774 if interpretation is None:
775 for value_line in kvpair.value_element.value_lines:
776 comment_element = value_line.comment_element
777 if comment_element:
778 assert comment_element.position_in_parent().line_position == 0
779 comment_start_line = (
780 value_line.position_in_parent()
781 .relative_to(value_element_pos)
782 .line_position
783 )
784 _emit_tokens_for_comment_element(
785 sem_token_state,
786 comment_element,
787 comment_start_line,
788 comment_token_code,
789 )
790 continue
791 else:
792 parts = kvpair.interpret_as(interpretation).iter_parts()
793 for te in parts:
794 if te.is_whitespace:
795 continue
796 if te.is_separator: 796 ↛ 797line 796 didn't jump to line 797 because the condition on line 796 was never true
797 continue
798 value_range_in_parent_te = te.range_in_parent()
799 value_range_te = value_range_in_parent_te.relative_to(value_element_pos)
800 value = te.convert_to_text()
801 if te.is_comment: 801 ↛ 802line 801 didn't jump to line 802 because the condition on line 801 was never true
802 token_type = comment_token_code
803 value = value.rstrip()
804 elif value in known_values:
805 token_type = known_value_token_code
806 elif allow_substvars and "${" in value: 806 ↛ 807line 806 didn't jump to line 807 because the condition on line 806 was never true
807 _process_value_with_substvars(
808 sem_token_state,
809 value,
810 value_range_te,
811 None,
812 )
813 continue
814 else:
815 continue
816 value_len = doc.position_codec.client_num_units(value)
817 sem_token_state.emit_token(
818 te_position_to_lsp(value_range_te.start_pos),
819 value_len,
820 token_type,
821 )
824def _split_into_substvars(
825 value: str,
826 base_token_type: Optional[int],
827 substvar_token_type: int,
828) -> Iterable[Tuple[str, Optional[int]]]:
830 i = 0
831 next_search = i
832 full_value_len = len(value)
833 while i < full_value_len:
834 try:
835 subst_var_start = value.index("${", next_search)
836 subst_var_end = value.index("}", subst_var_start + 2)
837 except ValueError:
838 token = value[i:full_value_len]
839 if token: 839 ↛ 841line 839 didn't jump to line 841 because the condition on line 839 was always true
840 yield token, base_token_type
841 return
843 subst_var_end += 1
844 subst_var = value[subst_var_start:subst_var_end]
845 if subst_var != "${}" and not SUBSTVAR_RE.match(subst_var): 845 ↛ 846line 845 didn't jump to line 846 because the condition on line 845 was never true
846 subst_var = None
848 if subst_var is None: 848 ↛ 849line 848 didn't jump to line 849 because the condition on line 848 was never true
849 next_search = subst_var_end
850 continue
852 token = value[i:subst_var_start]
853 if token: 853 ↛ 854line 853 didn't jump to line 854 because the condition on line 853 was never true
854 yield token, base_token_type
855 yield subst_var, substvar_token_type
856 i = subst_var_end
857 next_search = i
860def _process_value_with_substvars(
861 sem_token_state: Deb822SemanticTokensState,
862 value: str,
863 value_range_te: "TERange",
864 base_token_type: Optional[int],
865) -> None:
866 pos_codec = sem_token_state.doc.position_codec
868 # TODO: Support overlapping tokens if the editor does.
870 line = value_range_te.start_pos.line_position
871 token_pos = value_range_te.start_pos.cursor_position
872 substvar_token_code = sem_token_state.substvars_token_code
873 for token, token_type in _split_into_substvars(
874 value,
875 base_token_type,
876 substvar_token_code,
877 ):
878 token_len = len(token)
879 if token_type is not None:
880 sem_token_state.emit_token(
881 types.Position(line, token_pos),
882 pos_codec.client_num_units(token),
883 token_type,
884 )
885 token_pos += token_len
888def deb822_format_file(
889 lint_state: LintState,
890 file_metadata: Deb822FileMetadata[Any, Any],
891) -> Optional[Sequence[TextEdit]]:
892 deb822_file = lint_state.parsed_deb822_file_content
893 if not file_metadata.file_metadata_applies_to_file(deb822_file): 893 ↛ 894line 893 didn't jump to line 894 because the condition on line 893 was never true
894 return None
895 effective_preference = lint_state.effective_preference
896 if effective_preference is None:
897 return trim_end_of_line_whitespace(lint_state.position_codec, lint_state.lines)
898 formatter = effective_preference.deb822_formatter()
899 lines = lint_state.lines
900 deb822_file = lint_state.parsed_deb822_file_content
901 if deb822_file is None: 901 ↛ 902line 901 didn't jump to line 902 because the condition on line 901 was never true
902 _warn("The deb822 result missing failed!?")
903 return None
905 return list(
906 file_metadata.reformat(
907 effective_preference,
908 deb822_file,
909 formatter,
910 lint_state.content,
911 lint_state.position_codec,
912 lines,
913 )
914 )
917async def deb822_semantic_tokens_full(
918 ls: "DebputyLanguageServer",
919 request: SemanticTokensParams,
920 file_metadata: Deb822FileMetadata[Any, Any],
921) -> Optional[SemanticTokens]:
922 doc = ls.workspace.get_text_document(request.text_document.uri)
923 deb822_file = ls.lint_state(doc).parsed_deb822_file_content
924 if not file_metadata.file_metadata_applies_to_file(deb822_file): 924 ↛ 925line 924 didn't jump to line 925 because the condition on line 924 was never true
925 return None
926 position_codec = doc.position_codec
927 lines = doc.lines
928 if deb822_file is None: 928 ↛ 929line 928 didn't jump to line 929 because the condition on line 928 was never true
929 _warn("The deb822 result missing failed!?")
930 ls.show_message_log(
931 "Internal error; could not get deb822 content!?", MessageType.Warning
932 )
933 return None
935 tokens: List[int] = []
936 sem_token_state = Deb822SemanticTokensState(
937 ls,
938 doc,
939 lines,
940 tokens,
941 file_metadata,
942 )
944 comment_token_code = sem_token_state.comment_token_code
946 stanza_idx = 0
948 async for part_range, part in ls.slow_iter(
949 with_range_in_continuous_parts(deb822_file.iter_parts()), yield_every=20
950 ):
951 if part.is_comment:
952 pos = part_range.start_pos
953 sem_token_state.emit_token(
954 te_position_to_lsp(pos),
955 # Avoid trailing newline
956 position_codec.client_num_units(part.convert_to_text().rstrip()),
957 comment_token_code,
958 )
959 elif isinstance(part, Deb822ParagraphElement):
960 await _deb822_paragraph_semantic_tokens_full(
961 ls,
962 sem_token_state,
963 part,
964 part_range,
965 stanza_idx,
966 )
967 stanza_idx += 1
968 if not tokens: 968 ↛ 969line 968 didn't jump to line 969 because the condition on line 968 was never true
969 return None
970 return SemanticTokens(tokens)
973def _complete_field_name(
974 lint_state: LintState,
975 stanza_metadata: StanzaMetadata[Any],
976 matched_stanzas: Iterable[Deb822ParagraphElement],
977 markdown_kind: MarkupKind,
978) -> Sequence[CompletionItem]:
979 items = []
980 matched_stanzas = list(matched_stanzas)
981 seen_fields = set(
982 stanza_metadata.normalize_field_name(f.lower())
983 for f in chain.from_iterable(
984 # The typing from python3-debian is not entirely optimal here. The iter always return a
985 # `str`, but the provided type is `ParagraphKey` (because `__getitem__` supports those)
986 # and that is not exclusively a `str`.
987 #
988 # So, this cast for now
989 cast("Iterable[str]", s)
990 for s in matched_stanzas
991 )
992 )
993 for cand_key, cand in stanza_metadata.items():
994 if stanza_metadata.normalize_field_name(cand_key.lower()) in seen_fields:
995 continue
996 item = cand.complete_field(lint_state, matched_stanzas, markdown_kind)
997 if item is not None:
998 items.append(item)
999 return items