Coverage for src/debputy/lsp/lsp_test_support.py: 95%

31 statements  

« prev     ^ index     » next       coverage.py v7.8.2, created at 2025-09-07 09:27 +0000

1import dataclasses 

2from typing import List, Optional, FrozenSet, TYPE_CHECKING 

3 

4from debputy.lsp.lsp_features import SEMANTIC_TOKENS_LEGEND 

5from debputy.util import grouper 

6 

7if TYPE_CHECKING: 

8 import lsprotocol.types as types 

9else: 

10 import debputy.lsprotocol.types as types 

11 

12 

13@dataclasses.dataclass(slots=True, frozen=True) 

14class ResolvedSemanticToken: 

15 range: types.Range 

16 value: str 

17 token_name: str 

18 modifiers: FrozenSet[str] = frozenset() 

19 

20 

21def resolved_semantic_token( 

22 line_no: int, 

23 col_start: int, 

24 value: str, 

25 token_type: str, 

26 *, 

27 token_modifiers: FrozenSet[str] = frozenset(), 

28) -> ResolvedSemanticToken: 

29 return ResolvedSemanticToken( 

30 types.Range( 

31 types.Position( 

32 line_no, 

33 col_start, 

34 ), 

35 types.Position( 

36 line_no, 

37 col_start + len(value), 

38 ), 

39 ), 

40 value, 

41 token_type, 

42 token_modifiers, 

43 ) 

44 

45 

46def resolve_semantic_tokens( 

47 lines: List[str], 

48 token_result: Optional[types.SemanticTokens], 

49) -> Optional[List[ResolvedSemanticToken]]: 

50 if token_result is None: 50 ↛ 51line 50 didn't jump to line 51 because the condition on line 50 was never true

51 return None 

52 assert (len(token_result.data) % 5) == 0 

53 current_line = 0 

54 current_col = 0 

55 resolved_tokens = [] 

56 token_types = SEMANTIC_TOKENS_LEGEND.token_types 

57 for token_data in grouper(token_result.data, 5, incomplete="strict"): 

58 line_delta, col_start_delta, token_len, token_code, modifier_codes = token_data 

59 if line_delta: 

60 current_col = 0 

61 current_line += line_delta 

62 current_col += col_start_delta 

63 assert ( 

64 not modifier_codes 

65 ), "TODO: Modifiers not supported (no modifiers defined)" 

66 

67 value = lines[current_line][current_col : current_col + token_len] 

68 

69 resolved_tokens.append( 

70 resolved_semantic_token( 

71 current_line, 

72 current_col, 

73 value, 

74 token_types[token_code], 

75 ), 

76 ) 

77 

78 return resolved_tokens