Celestia

Форк
0
/
rcfile.py 
269 строк · 7.6 Кб
1
"""Routines for processing Windows .rc files."""
2

3
# Copyright © 2023, the Celestia Development Team
4
# Original version by Andrew Tribick, December 2023
5
#
6
# Functionality based on extract_resource_strings.pl
7
# Original version by Christophe Teyssier <chris@teyssier.org>
8
#
9
# This program is free software; you can redistribute it and/or
10
# modify it under the terms of the GNU General Public License
11
# as published by the Free Software Foundation; either version 2
12
# of the License, or (at your option) any later version.
13

14
from __future__ import annotations
15

16
from enum import auto, Enum
17
import re
18
from typing import Generator, Optional, NamedTuple, TextIO, TYPE_CHECKING, Union
19

20
from .utilities import Location, Message, unquote
21

22
if TYPE_CHECKING:
23
    import os
24

25

26
class TokenType(Enum):
27
    """Token types"""
28

29
    WHITESPACE = auto()
30
    NEWLINE = auto()
31
    COMMENT = auto()
32
    INSTRUCTION = auto()
33
    OPEN = auto()
34
    CLOSE = auto()
35
    OPERATOR = auto()
36
    NUMBER = auto()
37
    KEYWORD = auto()
38
    QUOTED = auto()
39

40

41
class Token(NamedTuple):
42
    """Token info"""
43

44
    type: TokenType
45
    value: str
46
    line: int
47

48

49
_MATCHERS = [
50
    (re.compile(r"[ \t]+"), TokenType.WHITESPACE),
51
    (re.compile(r"[A-Za-z_][0-9A-Za-z_]*"), TokenType.KEYWORD),
52
    (re.compile(r"[1-9][0-9]*|0(?:[xX][0-9A-Fa-f]+|[0-7]*)"), TokenType.NUMBER),
53
]
54

55
_OPMATCH = {
56
    "(": TokenType.OPEN,
57
    "[": TokenType.OPEN,
58
    "{": TokenType.OPEN,
59
    "}": TokenType.CLOSE,
60
    "]": TokenType.CLOSE,
61
    ")": TokenType.CLOSE,
62
}
63

64

65
class RCTokenizer:
66
    """Simple RC file tokenizer, no preprocessor."""
67

68
    source: TextIO
69
    data: str
70
    line: int
71
    pos: int
72

73
    def __init__(self, source: TextIO) -> None:
74
        self.source = source
75
        self.data = source.readline().rstrip("\n")
76
        self.line = 1
77
        self.pos = 0
78

79
    def __iter__(self) -> RCTokenizer:
80
        return self
81

82
    def __next__(self) -> Token:
83
        """Gets a token from the file"""
84
        if self.pos >= len(self.data):
85
            self.data = self.source.readline()
86
            if not self.data:
87
                raise StopIteration
88
            self.data = self.data.rstrip("\n")
89
            self.line += 1
90
            self.pos = 0
91
            return Token(TokenType.NEWLINE, "\n", self.line - 1)
92

93
        ch = self.data[self.pos]
94
        if ch == "/":
95
            token = self._handle_slash()
96
        elif ch == "#":
97
            token = self._handle_instruction()
98
        elif ch == '"':
99
            token = self._handle_quoted()
100
        else:
101
            for matcher, token_type in _MATCHERS:
102
                match = matcher.match(self.data[self.pos :])
103
                if match:
104
                    self.pos += match.end()
105
                    token = Token(token_type, match.group(0), self.line)
106
                    break
107
            else:
108
                self.pos += 1
109
                token = Token(_OPMATCH.get(ch, TokenType.OPERATOR), ch, self.line)
110

111
        return token
112

113
    def _handle_slash(self) -> Token:
114
        self.pos += 1
115
        if self.pos == len(self.data):
116
            return Token(TokenType.OPERATOR, "/", self.line)
117
        ch = self.data[self.pos]
118
        if ch == "/":
119
            # supply prefix as we are positioned on second / of //
120
            return self._read_to_eol(TokenType.COMMENT, "/")
121
        if ch == "*":
122
            return self._handle_block_comment()
123
        return Token(TokenType.OPERATOR, "/", self.line)
124

125
    def _handle_instruction(self) -> Token:
126
        return self._read_to_eol(TokenType.INSTRUCTION, "")
127

128
    def _read_to_eol(self, token_type: TokenType, prefix: str) -> Token:
129
        token = prefix + self.data[self.pos :]
130
        self.pos = len(self.data)
131
        return Token(token_type, token, self.line)
132

133
    def _handle_block_comment(self) -> Token:
134
        token = "/*"  # nosec B105
135
        start_line = self.line
136
        self.pos += 1
137
        new_pos = self.data.find("*/", self.pos)
138
        while new_pos < 0:
139
            token += self.data[self.pos :]
140
            self.data = self.source.readline()
141
            if not self.data:
142
                raise ValueError("Unclosed block comment")
143
            self.line += 1
144
            self.pos = 0
145
            new_pos = self.data.find("*/")
146

147
        token += self.data[self.pos : new_pos]
148
        self.pos = new_pos + 2
149
        return Token(TokenType.COMMENT, token, start_line)
150

151
    def _handle_quoted(self) -> Token:
152
        new_pos = self.pos
153
        is_escape = False
154
        while True:
155
            new_pos += 1
156
            if new_pos == len(self.data):
157
                raise ValueError("Unclosed quotes")
158
            ch = self.data[new_pos]
159
            if is_escape:
160
                is_escape = False
161
            elif ch == "\\":
162
                is_escape = True
163
            elif ch == '"':
164
                break
165
        new_pos += 1
166
        token = self.data[self.pos : new_pos]
167
        self.pos = new_pos
168
        return Token(TokenType.QUOTED, token, self.line)
169

170

171
RC_KEYWORDS = {
172
    "POPUP",
173
    "MENUITEM",
174
    "DEFPUSHBUTTON",
175
    "CAPTION",
176
    "PUSHBUTTON",
177
    "RTEXT",
178
    "CTEXT",
179
    "LTEXT",
180
    "GROUPBOX",
181
    "AUTOCHECKBOX",
182
    "AUTORADIOBUTTON",
183
}
184

185
EXTRACT_SKIP_TYPES = {
186
    TokenType.NEWLINE,
187
    TokenType.WHITESPACE,
188
    TokenType.COMMENT,
189
    TokenType.INSTRUCTION,
190
}
191

192
NC_TOKENS = [
193
    (TokenType.OPEN, "("),
194
    (TokenType.QUOTED, None),
195
    (TokenType.OPERATOR, ","),
196
    (TokenType.QUOTED, None),
197
    (TokenType.CLOSE, ")"),
198
]
199

200

201
class _RCExtractor:
202
    filename: str
203
    tokenizer: RCTokenizer
204

205
    def __init__(
206
        self, filename: Union[str, os.PathLike], tokenizer: RCTokenizer
207
    ) -> None:
208
        self.filename = str(filename)
209
        self.tokenizer = tokenizer
210

211
    def __iter__(self) -> _RCExtractor:
212
        return self
213

214
    def __next__(self) -> tuple[Message, Location]:
215
        token = None
216
        retry = False
217
        while True:
218
            if retry:
219
                retry = False
220
            else:
221
                token = next(self.tokenizer)
222
            if token.type != TokenType.KEYWORD:
223
                continue
224
            if token.value in RC_KEYWORDS:
225
                result = self._try_get_simple()
226
                if result:
227
                    return result
228
                retry = True
229

230
    def _try_get_simple(self) -> Optional[tuple[Message, Location]]:
231
        while True:
232
            token = next(self.tokenizer)
233
            if token.type not in EXTRACT_SKIP_TYPES:
234
                break
235
        if token.type == TokenType.KEYWORD and token.value == "NC_":
236
            return self._try_get_nc()
237
        if token.type != TokenType.QUOTED:
238
            return None
239
        message = unquote(token.value)
240
        if message.strip():
241
            return Message(message, None), Location(self.filename, token.line, [])
242
        return None
243

244
    def _try_get_nc(self) -> Optional[tuple[Message, Location]]:
245
        location = Location(self.filename, self.tokenizer.line, [])
246
        buffer = []
247
        while len(buffer) < 5:
248
            token = next(self.tokenizer)
249
            if token.type in EXTRACT_SKIP_TYPES:
250
                continue
251
            expected_type, expected_value = NC_TOKENS[len(buffer)]
252
            if token.type != expected_type or (
253
                expected_value is not None and token.value != expected_value
254
            ):
255
                return None
256
            buffer.append(token)
257
        message = unquote(buffer[3].value)
258
        context = unquote(buffer[1].value)
259
        return Message(message, context), location
260

261

262
def extract_rc(
263
    filename: Union[str, os.PathLike]
264
) -> Generator[tuple[Message, Location], None, None]:
265
    """Extracts messages from a single Windows .rc file"""
266
    with open(filename, "rt", encoding="utf-16") as file:
267
        tokenizer = RCTokenizer(file)
268
        for result in _RCExtractor(filename, tokenizer):
269
            yield result
270

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.