Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# -*- coding: utf-8 -*- 

2#@+leo-ver=5-thin 

3#@+node:ekr.20160316095222.1: * @file ../commands/convertCommands.py 

4#@@first 

5"""Leo's file-conversion commands.""" 

6 

7import re 

8from typing import Any, Dict, List, Optional, Tuple 

9from leo.core import leoGlobals as g 

10from leo.core import leoBeautify 

11from leo.commands.baseCommands import BaseEditCommandsClass 

12 

13def cmd(name): 

14 """Command decorator for the ConvertCommandsClass class.""" 

15 return g.new_cmd_decorator(name, ['c', 'convertCommands',]) 

16 

17#@+<< class To_Python >> 

18#@+node:ekr.20150514063305.123: ** << class To_Python >> 

19class To_Python: # pragma: no cover 

20 """The base class for x-to-python commands.""" 

21 #@+others 

22 #@+node:ekr.20150514063305.125: *3* To_Python.ctor 

23 def __init__(self, c): 

24 """Ctor for To_Python class.""" 

25 self.c = c 

26 self.p = self.c.p.copy() 

27 aList = g.get_directives_dict_list(self.p) 

28 self.tab_width = g.scanAtTabwidthDirectives(aList) or 4 

29 #@+node:ekr.20150514063305.126: *3* To_Python.go 

30 def go(self): 

31 import time 

32 t1 = time.time() 

33 c = self.c 

34 u, undoType = c.undoer, 'typescript-to-python' 

35 pp = leoBeautify.CPrettyPrinter(c) 

36 u.beforeChangeGroup(c.p, undoType) 

37 changed = False 

38 n_files, n_nodes = 0, 0 

39 special = ('class ', 'module ', '@file ', '@@file ') 

40 files = ('@file ', '@@file ') 

41 for p in self.p.self_and_subtree(copy=False): 

42 if p.b: 

43 n_nodes += 1 

44 if any(p.h.startswith(z) for z in special): 

45 g.es_print(p.h) 

46 if any(p.h.startswith(z) for z in files): 

47 n_files += 1 

48 bunch = u.beforeChangeNodeContents(p) 

49 s = pp.indent(p, giveWarnings=False) 

50 aList = list(s) 

51 self.convertCodeList(aList) 

52 s = ''.join(aList) 

53 if s != p.b: 

54 p.b = s 

55 p.setDirty() 

56 u.afterChangeNodeContents(p, undoType, bunch) 

57 changed = True 

58 # Call this only once, at end. 

59 if changed: 

60 u.afterChangeGroup(c.p, undoType, reportFlag=False) 

61 t2 = time.time() 

62 g.es_print(f"done! {n_files} files, {n_nodes} nodes, {t2 - t1:2.2f} sec") 

63 #@+node:ekr.20150514063305.127: *3* To_Python.convertCodeList 

64 def convertCodeList(self, aList): 

65 """The main search/replace method.""" 

66 g.trace('must be defined in subclasses.') 

67 #@+node:ekr.20150514063305.128: *3* To_Python.Utils 

68 #@+node:ekr.20150514063305.129: *4* match... 

69 #@+node:ekr.20150514063305.130: *5* match 

70 def match(self, s, i, pat): 

71 """ 

72 Return True if s[i:] matches the pat string. 

73 

74 We can't use g.match because s is usually a list. 

75 """ 

76 assert pat 

77 j = 0 

78 while i + j < len(s) and j < len(pat): 

79 if s[i + j] == pat[j]: 

80 j += 1 

81 if j == len(pat): 

82 return True 

83 else: 

84 return False 

85 return False 

86 #@+node:ekr.20150514063305.131: *5* match_word 

87 def match_word(self, s, i, pat): 

88 """ 

89 Return True if s[i:] word matches the pat string. 

90 

91 We can't use g.match_word because s is usually a list 

92 and g.match_word uses s.find. 

93 """ 

94 if self.match(s, i, pat): 

95 j = i + len(pat) 

96 if j >= len(s): 

97 return True 

98 if not pat[-1].isalnum(): 

99 # Bug fix 10/16/2012: The pattern terminates the word. 

100 return True 

101 ch = s[j] 

102 return not ch.isalnum() and ch != '_' 

103 return False 

104 #@+node:ekr.20150514063305.132: *4* insert_not 

105 def insert_not(self, aList): 

106 """Change "!" to "not" except before an equal sign.""" 

107 i = 0 

108 while i < len(aList): 

109 if self.is_string_or_comment(aList, i): 

110 i = self.skip_string_or_comment(aList, i) 

111 elif aList[i] == '!' and not self.match(aList, i + 1, '='): 

112 aList[i : i + 1] = list('not ') 

113 i += 4 

114 else: 

115 i += 1 

116 #@+node:ekr.20150514063305.133: *4* is... 

117 #@+node:ekr.20150514063305.134: *5* is_section_def/ref 

118 def is_section_def(self, p): 

119 return self.is_section_ref(p.h) 

120 

121 def is_section_ref(self, s): 

122 n1 = s.find("<<", 0) 

123 n2 = s.find(">>", 0) 

124 return -1 < n1 < n2 and s[n1 + 2 : n2].strip() 

125 #@+node:ekr.20150514063305.135: *5* is_string_or_comment 

126 def is_string_or_comment(self, s, i): 

127 # Does range checking. 

128 m = self.match 

129 return m(s, i, "'") or m(s, i, '"') or m(s, i, "//") or m(s, i, "/*") 

130 #@+node:ekr.20150514063305.136: *5* is_ws and is_ws_or_nl 

131 def is_ws(self, ch): 

132 return ch in ' \t' 

133 

134 def is_ws_or_nl(self, ch): 

135 return ch in ' \t\n' 

136 #@+node:ekr.20150514063305.137: *4* prevNonWsChar and prevNonWsOrNlChar 

137 def prevNonWsChar(self, s, i): 

138 i -= 1 

139 while i >= 0 and self.is_ws(s[i]): 

140 i -= 1 

141 return i 

142 

143 def prevNonWsOrNlChar(self, s, i): 

144 i -= 1 

145 while i >= 0 and self.is_ws_or_nl(s[i]): 

146 i -= 1 

147 return i 

148 #@+node:ekr.20150514063305.138: *4* remove... 

149 #@+node:ekr.20150514063305.139: *5* removeBlankLines 

150 def removeBlankLines(self, aList): 

151 i = 0 

152 while i < len(aList): 

153 j = i 

154 while j < len(aList) and aList[j] in " \t": 

155 j += 1 

156 if j == len(aList) or aList[j] == '\n': 

157 del aList[i : j + 1] 

158 else: 

159 i = self.skip_past_line(aList, i) 

160 #@+node:ekr.20150514063305.140: *5* removeExcessWs 

161 def removeExcessWs(self, aList): 

162 i = 0 

163 i = self.removeExcessWsFromLine(aList, i) 

164 while i < len(aList): 

165 if self.is_string_or_comment(aList, i): 

166 i = self.skip_string_or_comment(aList, i) 

167 elif self.match(aList, i, '\n'): 

168 i += 1 

169 i = self.removeExcessWsFromLine(aList, i) 

170 else: i += 1 

171 #@+node:ekr.20150514063305.141: *5* removeExessWsFromLine 

172 def removeExcessWsFromLine(self, aList, i): 

173 assert(i == 0 or aList[i - 1] == '\n') 

174 i = self.skip_ws(aList, i) 

175 # Retain the leading whitespace. 

176 while i < len(aList): 

177 if self.is_string_or_comment(aList, i): 

178 break # safe 

179 elif self.match(aList, i, '\n'): 

180 break 

181 elif self.match(aList, i, ' ') or self.match(aList, i, '\t'): 

182 # Replace all whitespace by one blank. 

183 j = self.skip_ws(aList, i) 

184 assert j > i 

185 aList[i:j] = [' '] 

186 i += 1 # make sure we don't go past a newline! 

187 else: i += 1 

188 return i 

189 #@+node:ekr.20150514063305.142: *5* removeMatchingBrackets 

190 def removeMatchingBrackets(self, aList, i): 

191 j = self.skip_to_matching_bracket(aList, i) 

192 if i < j < len(aList): 

193 c = aList[j] 

194 if c == ')' or c == ']' or c == '}': 

195 del aList[j : j + 1] 

196 del aList[i : i + 1] 

197 return j - 1 

198 return j + 1 

199 return j 

200 #@+node:ekr.20150514063305.143: *5* removeSemicolonsAtEndOfLines 

201 def removeSemicolonsAtEndOfLines(self, aList): 

202 i = 0 

203 while i < len(aList): 

204 if self.is_string_or_comment(aList, i): 

205 i = self.skip_string_or_comment(aList, i) 

206 elif aList[i] == ';': 

207 j = self.skip_ws(aList, i + 1) 

208 if ( 

209 j >= len(aList) or 

210 self.match(aList, j, '\n') or 

211 self.match(aList, j, '#') or 

212 self.match(aList, j, "//") 

213 ): 

214 del aList[i] 

215 else: i += 1 

216 else: i += 1 

217 #@+node:ekr.20150514063305.144: *5* removeTrailingWs 

218 def removeTrailingWs(self, aList): 

219 i = 0 

220 while i < len(aList): 

221 if self.is_ws(aList[i]): 

222 j = i 

223 i = self.skip_ws(aList, i) 

224 assert j < i 

225 if i >= len(aList) or aList[i] == '\n': 

226 # print "removing trailing ws:", `i-j` 

227 del aList[j:i] 

228 i = j 

229 else: i += 1 

230 #@+node:ekr.20150514063305.145: *4* replace... & safe_replace 

231 #@+node:ekr.20150514063305.146: *5* replace 

232 def replace(self, aList, findString, changeString): 

233 """ 

234 Replaces all occurances of findString by changeString. 

235 changeString may be the empty string, but not None. 

236 """ 

237 if not findString: 

238 return 

239 changeList = list(changeString) 

240 i = 0 

241 while i < len(aList): 

242 if self.match(aList, i, findString): 

243 aList[i : i + len(findString)] = changeList 

244 i += len(changeList) 

245 else: 

246 i += 1 

247 #@+node:ekr.20150514063305.147: *5* replaceComments 

248 def replaceComments(self, aList): 

249 i = 0 

250 while i < len(aList): 

251 # Loop invariant: j > progress at end. 

252 progress = i 

253 if self.match(aList, i, "//"): 

254 aList[i : i + 2] = ['#'] 

255 j = self.skip_past_line(aList, i) 

256 elif self.match(aList, i, "/*"): 

257 j = self.skip_c_block_comment(aList, i) 

258 k = i 

259 while k - 1 >= 0 and aList[k - 1] in ' \t': 

260 k -= 1 

261 assert k == 0 or aList[k - 1] not in ' \t' 

262 lws = ''.join(aList[k:i]) 

263 comment_body = ''.join(aList[i + 2 : j - 2]) 

264 comment_lines = g.splitLines(lws + comment_body) 

265 comment_lines = self.munge_block_comment(comment_lines) 

266 comment = '\n'.join(comment_lines) # A list of lines. 

267 comment_list = list(comment) # A list of characters. 

268 aList[k:j] = comment_list 

269 j = k + len(comment_list) 

270 progress = j - 1 # Disable the check below. 

271 elif self.match(aList, i, '"') or self.match(aList, i, "'"): 

272 j = self.skip_string(aList, i) 

273 else: 

274 j = i + 1 

275 # Defensive programming. 

276 if j == progress: 

277 j += 1 

278 assert j > progress 

279 i = j 

280 #@+node:ekr.20150514063305.148: *6* munge_block_comment 

281 def munge_block_comment(self, comment_lines): 

282 

283 n = len(comment_lines) 

284 assert n > 0 

285 s = comment_lines[0] 

286 junk, w = g.skip_leading_ws_with_indent(s, 0, tab_width=4) 

287 if n == 1: 

288 return [f"{' ' * (w - 1)}# {s.strip()}"] 

289 junk, w = g.skip_leading_ws_with_indent(s, 0, tab_width=4) 

290 result = [] 

291 for i, s in enumerate(comment_lines): 

292 if s.strip(): 

293 result.append(f"{' ' * w}# {s.strip()}") 

294 elif i == n - 1: 

295 pass # Omit the line entirely. 

296 else: 

297 result.append('') # Add a blank line 

298 return result 

299 #@+node:ekr.20150514063305.149: *5* replaceSectionDefs 

300 def replaceSectionDefs(self, aList): 

301 """Replaces < < x > > = by @c (at the start of lines).""" 

302 if not aList: 

303 return 

304 i = 0 

305 j = self.is_section_def(aList[i]) 

306 if j > 0: 

307 aList[i:j] = list("@c ") 

308 while i < len(aList): 

309 if self.is_string_or_comment(aList, i): 

310 i = self.skip_string_or_comment(aList, i) 

311 elif self.match(aList, i, "\n"): 

312 i += 1 

313 j = self.is_section_def(aList[i]) 

314 if j > i: 

315 aList[i:j] = list("@c ") 

316 else: i += 1 

317 #@+node:ekr.20150514063305.150: *5* safe_replace 

318 def safe_replace(self, aList, findString, changeString): 

319 """ 

320 Replaces occurances of findString by changeString, 

321 but only outside of C comments and strings. 

322 changeString may be the empty string, but not None. 

323 """ 

324 if not findString: 

325 return 

326 changeList = list(changeString) 

327 i = 0 

328 if findString[0].isalpha(): # use self.match_word 

329 while i < len(aList): 

330 if self.is_string_or_comment(aList, i): 

331 i = self.skip_string_or_comment(aList, i) 

332 elif self.match_word(aList, i, findString): 

333 aList[i : i + len(findString)] = changeList 

334 i += len(changeList) 

335 else: 

336 i += 1 

337 else: #use self.match 

338 while i < len(aList): 

339 if self.match(aList, i, findString): 

340 aList[i : i + len(findString)] = changeList 

341 i += len(changeList) 

342 else: 

343 i += 1 

344 #@+node:ekr.20150514063305.151: *4* skip 

345 #@+node:ekr.20150514063305.152: *5* skip_c_block_comment 

346 def skip_c_block_comment(self, s, i): 

347 assert self.match(s, i, "/*") 

348 i += 2 

349 while i < len(s): 

350 if self.match(s, i, "*/"): 

351 return i + 2 

352 i += 1 

353 return i 

354 #@+node:ekr.20150514063305.153: *5* skip_line 

355 def skip_line(self, s, i): 

356 while i < len(s) and s[i] != '\n': 

357 i += 1 

358 return i 

359 #@+node:ekr.20150514063305.154: *5* skip_past_line 

360 def skip_past_line(self, s, i): 

361 while i < len(s) and s[i] != '\n': 

362 i += 1 

363 if i < len(s) and s[i] == '\n': 

364 i += 1 

365 return i 

366 #@+node:ekr.20150514063305.155: *5* skip_past_word 

367 def skip_past_word(self, s, i): 

368 assert s[i].isalpha() or s[i] == '~' 

369 # Kludge: this helps recognize dtors. 

370 if s[i] == '~': 

371 i += 1 

372 while i < len(s): 

373 ch = s[i] 

374 if ch.isalnum() or ch == '_': 

375 i += 1 

376 else: 

377 break 

378 return i 

379 #@+node:ekr.20150514063305.156: *5* skip_string 

380 def skip_string(self, s, i): 

381 delim = s[i] # handle either single or double-quoted strings 

382 assert delim == '"' or delim == "'" 

383 i += 1 

384 while i < len(s): 

385 if s[i] == delim: 

386 return i + 1 

387 if s[i] == '\\': 

388 i += 2 

389 else: 

390 i += 1 

391 return i 

392 #@+node:ekr.20150514063305.157: *5* skip_string_or_comment 

393 def skip_string_or_comment(self, s, i): 

394 if self.match(s, i, "'") or self.match(s, i, '"'): 

395 j = self.skip_string(s, i) 

396 elif self.match(s, i, "//"): 

397 j = self.skip_past_line(s, i) 

398 elif self.match(s, i, "/*"): 

399 j = self.skip_c_block_comment(s, i) 

400 else: 

401 assert False 

402 return j 

403 #@+node:ekr.20150514063305.158: *5* skip_to_matching_bracket 

404 def skip_to_matching_bracket(self, s, i): 

405 ch = s[i] 

406 if ch == '(': 

407 delim = ')' 

408 elif ch == '{': 

409 delim = '}' 

410 elif ch == '[': 

411 delim = ']' 

412 else: 

413 assert False 

414 i += 1 

415 while i < len(s): 

416 ch = s[i] 

417 if self.is_string_or_comment(s, i): 

418 i = self.skip_string_or_comment(s, i) 

419 elif ch == delim: 

420 return i 

421 elif ch == '(' or ch == '[' or ch == '{': 

422 i = self.skip_to_matching_bracket(s, i) 

423 i += 1 # skip the closing bracket. 

424 else: i += 1 

425 return i 

426 #@+node:ekr.20150514063305.159: *5* skip_ws and skip_ws_and_nl 

427 def skip_ws(self, aList, i): 

428 while i < len(aList): 

429 c = aList[i] 

430 if c == ' ' or c == '\t': 

431 i += 1 

432 else: break 

433 return i 

434 

435 def skip_ws_and_nl(self, aList, i): 

436 while i < len(aList): 

437 c = aList[i] 

438 if c == ' ' or c == '\t' or c == '\n': 

439 i += 1 

440 else: break 

441 return i 

442 #@-others 

443#@-<< class To_Python >> 

444 

445#@+others 

446#@+node:ekr.20210830070921.1: ** function: convert_at_test_nodes 

447def convert_at_test_nodes(c, converter, root, copy_tree=False): # pragma: no cover 

448 """ 

449 Use converter.convert() to convert all the @test nodes in the 

450 root's tree to children a new last top-level node. 

451 """ 

452 if not root: 

453 print('no root') 

454 return 

455 last = c.lastTopLevel() 

456 target = last.insertAfter() 

457 target.h = 'Converted nodes' 

458 count = 0 

459 for p in root.subtree(): 

460 if p.h.startswith('@test'): 

461 converter.convert_node(c, p, target) 

462 if copy_tree and p.hasChildren(): 

463 converter.copy_children(c, p, target.lastChild()) 

464 count += 1 

465 target.expand() 

466 c.redraw(target) 

467 print(f"converted {count} @test nodes") 

468#@+node:ekr.20160316111303.1: ** class ConvertCommandsClass 

469class ConvertCommandsClass(BaseEditCommandsClass): 

470 """Leo's file-conversion commands""" 

471 

472 def __init__(self, c): 

473 """Ctor for EditCommandsClass class.""" 

474 # pylint: disable=super-init-not-called 

475 self.c = c 

476 

477 #@+others 

478 #@+node:ekr.20220105151235.1: *3* ccc.add-mypy-annotations 

479 @cmd('add-mypy-annotations') 

480 def addMypyAnnotations(self, event): # pragma: no cover 

481 """ 

482 The add-mypy-annotations command adds mypy annotations to function and 

483 method definitions based on naming conventions. 

484  

485 To use, select an @<file> node for a python external file and execute 

486 add-mypy-annotations. The command rewrites the @<file> tree, adding 

487 mypy annotations for untyped function/method arguments. 

488 

489 The command attempts no type analysis. It uses "Any" as the type of 

490 functions and methods that do not specify a return type. As as special 

491 case, the type of __init__ methods is "None". 

492 

493 @data add-mypy-annotations in leoSettings.leo contains a list of 

494 key/value pairs. Keys are argument names (as used in Leo); values are 

495 mypy type names. 

496  

497 This command adds annotations for kwargs that have a constant initial 

498 value. 

499 """ 

500 self.Add_Mypy_Annotations(self.c).add_annotations() 

501 self.c.bodyWantsFocus() 

502 #@+node:ekr.20220105152521.1: *4* class Add_Mypy_Annotations 

503 class Add_Mypy_Annotations: 

504 

505 """A class that implements the add-mypy-annotations command.""" 

506 

507 changed_lines = 0 

508 tag = 'add-mypy-annotations' 

509 types_d: Dict[str, str] = {} # Keys are argument names. Values are mypy types. 

510 

511 def __init__(self, c): 

512 self.c = c 

513 

514 #@+others 

515 #@+node:ekr.20220105154019.1: *5* ama.init_types_d 

516 def init_types_d(self): # pragma: no cover 

517 """Init the annotations dict.""" 

518 c, d, tag = self.c, self.types_d, self.tag 

519 data = c.config.getData(tag) 

520 if not data: 

521 print(f"@data {tag} not found") 

522 return 

523 for s in data: 

524 try: 

525 key, val = s.split(',', 1) 

526 if key in d: 

527 print(f"{tag}: ignoring duplicate key: {s!r}") 

528 else: 

529 d [key] = val.strip() 

530 except ValueError: 

531 print(f"{tag}: ignoring invalid key/value pair: {s!r}") 

532 #@+node:ekr.20220105154158.1: *5* ama.add_annotations 

533 def add_annotations(self): # pragma: no cover 

534 

535 c, p, tag = self.c, self.c.p, self.tag 

536 # Checks. 

537 if not p.isAnyAtFileNode(): 

538 g.es_print(f"{tag}: not an @file node: {p.h}") 

539 return 

540 if not p.h.endswith('.py'): 

541 g.es_print(f"{tag}: not a python file: {p.h}") 

542 return 

543 # Init. 

544 self.init_types_d() 

545 if not self.types_d: 

546 print(f"{self.tag}: no types given") 

547 return 

548 try: 

549 # Convert p and (recursively) all its descendants. 

550 self.convert_node(p) 

551 # Redraw. 

552 c.expandAllSubheads(p) 

553 c.treeWantsFocusNow() 

554 except Exception: 

555 g.es_exception() 

556 #@+node:ekr.20220105155837.4: *5* ama.convert_node 

557 def convert_node(self, p): # pragma: no cover 

558 # Convert p.b into child.b 

559 self.convert_body(p) 

560 # Recursively create all descendants. 

561 for child in p.children(): 

562 self.convert_node(child) 

563 #@+node:ekr.20220105173331.1: *5* ama.convert_body  

564 def convert_body(self, p): 

565 """Convert p.b in place.""" 

566 c = self.c 

567 if not p.b.strip(): 

568 return # pragma: no cover 

569 s = self.def_pat.sub(self.do_def, p.b) 

570 if p.b != s: 

571 self.changed_lines += 1 

572 if not g.unitTesting: 

573 print(f"changed {p.h}") # pragma: no cover 

574 p.setDirty() 

575 c.setChanged() 

576 p.b = s 

577 #@+node:ekr.20220105174453.1: *5* ama.do_def 

578 def_pat = re.compile(r'^([ \t]*)def[ \t]+(\w+)\s*\((.*?)\)(.*?):(.*?)\n', re.MULTILINE + re.DOTALL) 

579 

580 def do_def(self, m): 

581 lws, name, args, return_val, tail = m.group(1), m.group(2), m.group(3), m.group(4), m.group(5) 

582 args = self.do_args(args) 

583 if not return_val.strip(): 

584 val_s = 'None' if name == '__init__' else 'Any' 

585 return_val = f" -> {val_s}" 

586 if not tail.strip(): 

587 tail = '' 

588 return f"{lws}def {name}({args}){return_val}:{tail}\n" 

589 #@+node:ekr.20220105174453.2: *5* ama.do_args 

590 arg_pat = re.compile(r'(\s*[\*\w]+\s*)([:,=])?') 

591 comment_pat = re.compile(r'(\s*#.*?\n)') 

592 

593 def do_args(self, args): 

594 """Add type annotations.""" 

595 multiline = '\n' in args.strip() 

596 comma = ',\n' if multiline else ', ' 

597 lws = ' '*4 if multiline else '' 

598 result: List[str] = [] 

599 i = 0 

600 while i < len(args): 

601 rest = args[i:] 

602 if not rest.strip(): 

603 break 

604 # Handle comments following arguments. 

605 if multiline and result: 

606 m = self.comment_pat.match(rest) 

607 if m: 

608 comment = m.group(0) 

609 i += len(comment) 

610 last = result.pop() 

611 result.append(f"{last.rstrip()} {comment.strip()}\n") 

612 continue 

613 m = self.arg_pat.match(rest) 

614 if not m: # pragma: no cover 

615 g.trace('==== bad args', i, repr(rest)) 

616 return args 

617 name1, tail = m.group(1), m.group(2) 

618 name = name1.strip() 

619 i += len(name1) 

620 if name == 'self': 

621 # Don't annotate self. 

622 result.append(f"{lws}{name}{comma}") 

623 if i < len(args) and args[i] == ',': 

624 i += 1 

625 elif tail == ':': 

626 arg, i = self.find_arg(args, i) 

627 result.append(f"{lws}{name}: {arg}{comma}") 

628 elif tail == '=': 

629 arg, i = self.find_arg(args, i) 

630 kind = self.kind(arg) 

631 result.append(f"{lws}{name}: {kind}={arg}{comma}") 

632 elif tail == ',': 

633 kind = self.types_d.get(name.strip(), 'Any') 

634 result.append(f"{lws}{name}: {kind}{comma}") 

635 i += 1 

636 else: 

637 kind = self.types_d.get(name.strip(), 'Any') 

638 result.append(f"{lws}{name}: {kind}{comma}") 

639 s = ''.join(result) 

640 if multiline: 

641 s = '\n' + s 

642 if not multiline and s.endswith(', '): 

643 s = s[:-2] 

644 return s 

645 #@+node:ekr.20220105190332.1: *5* ama.find_arg 

646 def find_arg(self, s, i): 

647 """ 

648 Scan over type annotations or initializers. 

649  

650 Return (arg, j), the index of the character following the argument starting at s[i]. 

651 """ 

652 assert s[i] in ':=', (i, s[i], s) 

653 i += 1 

654 while i < len(s) and s[i] == ' ': 

655 i += 1 

656 i1 = i 

657 level = 0 # Assume balanced parens, brackets and strings. 

658 while i < len(s): 

659 ch = s[i] 

660 i += 1 

661 if ch in '[{(': 

662 level += 1 

663 elif ch in ']}': 

664 level -= 1 

665 elif ch in '\'"': 

666 i = g.skip_python_string(s, i - 1) 

667 elif ch == ',' and level == 0: 

668 # Skip the comma, but don't include it in the result. 

669 break 

670 assert level == 0, (level, i == len(s), s) 

671 result = s[i1 : i].strip() 

672 if result.endswith(','): 

673 result = result[:-1].strip() 

674 return result, i 

675 #@+node:ekr.20220105222028.1: *5* ama.kind 

676 bool_pat = re.compile(r'(True|False)') 

677 float_pat = re.compile(r'[0-9]*\.[0-9]*') 

678 int_pat = re.compile(r'[0-9]+') 

679 none_pat = re.compile(r'None') 

680 string_pat = re.compile(r'[\'"].*[\'"]') 

681 

682 def kind(self, s): 

683 """Return the kind of the initial value s.""" 

684 if self.bool_pat.match(s): 

685 return 'bool' 

686 if self.float_pat.match(s): 

687 return 'float' 

688 if self.int_pat.match(s): 

689 return 'int' 

690 if self.none_pat.match(s): 

691 return 'Any' 

692 if self.string_pat.match(s): 

693 return 'str' 

694 return 'Any' # pragma: no cover 

695 #@-others 

696 #@+node:ekr.20160316091843.1: *3* ccc.c-to-python 

697 @cmd('c-to-python') 

698 def cToPy(self, event): # pragma: no cover 

699 """ 

700 The c-to-python command converts c or c++ text to python text. 

701 The conversion is not perfect, but it eliminates a lot of tedious 

702 text manipulation. 

703 """ 

704 self.C_To_Python(self.c).go() 

705 self.c.bodyWantsFocus() 

706 #@+node:ekr.20150514063305.160: *4* class C_To_Python (To_Python) 

707 class C_To_Python(To_Python): # pragma: no cover 

708 #@+others 

709 #@+node:ekr.20150514063305.161: *5* ctor & helpers (C_To_Python) 

710 def __init__(self, c): 

711 """Ctor for C_To_Python class.""" 

712 super().__init__(c) 

713 # 

714 # Internal state... 

715 self.class_name = '' 

716 # The class name for the present function. Used to modify ivars. 

717 self.ivars = [] 

718 # List of ivars to be converted to self.ivar 

719 self.get_user_types() 

720 #@+node:ekr.20150514063305.162: *6* get_user_types (C_To_Python) 

721 def get_user_types(self): 

722 c = self.c 

723 self.class_list = c.config.getData('c-to-python-class-list') or [] 

724 self.type_list = ( 

725 c.config.getData('c-to-python-type-list') or 

726 ["char", "void", "short", "long", "int", "double", "float"] 

727 ) 

728 aList = c.config.getData('c-to-python-ivars-dict') 

729 if aList: 

730 self.ivars_dict = self.parse_ivars_data(aList) 

731 else: 

732 self.ivars_dict = {} 

733 #@+node:ekr.20150514063305.163: *6* parse_ivars_data 

734 def parse_ivars_data(self, aList): 

735 d: Dict[str, List[str]] = {} 

736 key = None 

737 aList = [z.strip() for z in aList if z.strip()] 

738 for s in aList: 

739 if s.endswith(':'): 

740 key = s[:-1].strip() 

741 elif key: 

742 ivars = [z.strip() for z in s.split(',') if z.strip()] 

743 aList = d.get(key, []) 

744 aList.extend(ivars) 

745 d[key] = aList 

746 else: 

747 g.error('invalid @data c-to-python-ivars-dict', repr(s)) 

748 return {} 

749 return d 

750 #@+node:ekr.20150514063305.164: *5* convertCodeList (C_To_Python) & helpers 

751 def convertCodeList(self, aList): 

752 r, sr = self.replace, self.safe_replace 

753 # First... 

754 r(aList, "\r", '') 

755 # self.convertLeadingBlanks(aList) # Now done by indent. 

756 # if leoFlag: replaceSectionDefs(aList) 

757 self.mungeAllFunctions(aList) 

758 # Next... 

759 if 1: 

760 # CC2 stuff: 

761 sr(aList, "TRACEPB", "if trace: g.trace") 

762 sr(aList, "TRACEPN", "if trace: g.trace") 

763 sr(aList, "TRACEPX", "if trace: g.trace") 

764 sr(aList, "TICKB", "if trace: g.trace") 

765 sr(aList, "TICKN", "if trace: g.trace") 

766 sr(aList, "TICKX", "if trace: g.trace") 

767 sr(aList, "g.trace(ftag,", "g.trace(") 

768 sr(aList, "ASSERT_TRACE", "assert") 

769 sr(aList, "ASSERT", "assert") 

770 sr(aList, " -> ", '.') 

771 sr(aList, "->", '.') 

772 sr(aList, " . ", '.') 

773 sr(aList, "this.self", "self") 

774 sr(aList, "{", '') 

775 sr(aList, "}", '') 

776 sr(aList, "#if", "if") 

777 sr(aList, "#else", "else") 

778 sr(aList, "#endif", '') 

779 sr(aList, "else if", "elif") 

780 sr(aList, "else", "else:") 

781 sr(aList, "&&", " and ") 

782 sr(aList, "||", " or ") 

783 sr(aList, "TRUE", "True") 

784 sr(aList, "FALSE", "False") 

785 sr(aList, "NULL", "None") 

786 sr(aList, "this", "self") 

787 sr(aList, "try", "try:") 

788 sr(aList, "catch", "except:") 

789 # if leoFlag: sr(aList, "@code", "@c") 

790 # Next... 

791 self.handle_all_keywords(aList) 

792 self.insert_not(aList) 

793 self.removeSemicolonsAtEndOfLines(aList) 

794 # after processing for keywords 

795 # Last... 

796 # if firstPart and leoFlag: removeLeadingAtCode(aList) 

797 self.removeBlankLines(aList) 

798 self.removeExcessWs(aList) 

799 # your taste may vary: in Python I don't like extra whitespace 

800 sr(aList, " :", ":") 

801 sr(aList, ", ", ",") 

802 sr(aList, " ,", ",") 

803 sr(aList, " (", "(") 

804 sr(aList, "( ", "(") 

805 sr(aList, " )", ")") 

806 sr(aList, ") ", ")") 

807 sr(aList, "@language c", "@language python") 

808 self.replaceComments(aList) # should follow all calls to safe_replace 

809 self.removeTrailingWs(aList) 

810 r(aList, "\t ", "\t") # happens when deleting declarations. 

811 #@+node:ekr.20150514063305.165: *6* handle_all_keywords 

812 def handle_all_keywords(self, aList): 

813 """ 

814 converts if ( x ) to if x: 

815 converts while ( x ) to while x: 

816 """ 

817 i = 0 

818 while i < len(aList): 

819 if self.is_string_or_comment(aList, i): 

820 i = self.skip_string_or_comment(aList, i) 

821 elif ( 

822 self.match_word(aList, i, "if") or 

823 self.match_word(aList, i, "while") or 

824 self.match_word(aList, i, "for") or 

825 self.match_word(aList, i, "elif") 

826 ): 

827 i = self.handle_keyword(aList, i) 

828 else: 

829 i += 1 

830 # print "handAllKeywords2:", ''.join(aList) 

831 #@+node:ekr.20150514063305.166: *7* handle_keyword 

832 def handle_keyword(self, aList, i): 

833 if self.match_word(aList, i, "if"): 

834 i += 2 

835 elif self.match_word(aList, i, "elif"): 

836 i += 4 

837 elif self.match_word(aList, i, "while"): 

838 i += 5 

839 elif self.match_word(aList, i, "for"): 

840 i += 3 

841 else: 

842 assert False 

843 # Make sure one space follows the keyword. 

844 k = i 

845 i = self.skip_ws(aList, i) 

846 if k == i: 

847 c = aList[i] 

848 aList[i : i + 1] = [' ', c] 

849 i += 1 

850 # Remove '(' and matching ')' and add a ':' 

851 if aList[i] == "(": 

852 # Look ahead. Don't remove if we span a line. 

853 j = self.skip_to_matching_bracket(aList, i) 

854 k = i 

855 found = False 

856 while k < j and not found: 

857 found = aList[k] == '\n' 

858 k += 1 

859 if not found: 

860 j = self.removeMatchingBrackets(aList, i) 

861 if i < j < len(aList): 

862 ch = aList[j] 

863 aList[j : j + 1] = [ch, ":", " "] 

864 j = j + 2 

865 return j 

866 return i 

867 #@+node:ekr.20150514063305.167: *6* mungeAllFunctions 

868 def mungeAllFunctions(self, aList): 

869 """Scan for a '{' at the top level that is preceeded by ')' """ 

870 prevSemi = 0 # Previous semicolon: header contains all previous text 

871 i = 0 

872 firstOpen = None 

873 while i < len(aList): 

874 progress = i 

875 if self.is_string_or_comment(aList, i): 

876 j = self.skip_string_or_comment(aList, i) 

877 prevSemi = j 

878 elif self.match(aList, i, '('): 

879 if not firstOpen: 

880 firstOpen = i 

881 j = i + 1 

882 elif self.match(aList, i, '#'): 

883 # At this point, it is a preprocessor directive. 

884 j = self.skip_past_line(aList, i) 

885 prevSemi = j 

886 elif self.match(aList, i, ';'): 

887 j = i + 1 

888 prevSemi = j 

889 elif self.match(aList, i, "{"): 

890 j = self.handlePossibleFunctionHeader(aList, i, prevSemi, firstOpen) 

891 prevSemi = j 

892 firstOpen = None # restart the scan 

893 else: 

894 j = i + 1 

895 # Handle unusual cases. 

896 if j <= progress: 

897 j = progress + 1 

898 assert j > progress 

899 i = j 

900 #@+node:ekr.20150514063305.168: *7* handlePossibleFunctionHeader 

901 def handlePossibleFunctionHeader(self, aList, i, prevSemi, firstOpen): 

902 """ 

903 Converts function header lines from c++ format to python format. 

904 That is, converts 

905 x1..nn w::y ( t1 z1,..tn zn) { 

906 to 

907 def y (z1,..zn): { 

908 """ 

909 assert self.match(aList, i, "{") 

910 prevSemi = self.skip_ws_and_nl(aList, prevSemi) 

911 close = self.prevNonWsOrNlChar(aList, i) 

912 if close < 0 or aList[close] != ')': 

913 # Should not increase *Python* indent. 

914 return 1 + self.skip_to_matching_bracket(aList, i) 

915 if not firstOpen: 

916 return 1 + self.skip_to_matching_bracket(aList, i) 

917 close2 = self.skip_to_matching_bracket(aList, firstOpen) 

918 if close2 != close: 

919 return 1 + self.skip_to_matching_bracket(aList, i) 

920 open_paren = firstOpen 

921 assert aList[open_paren] == '(' 

922 head = aList[prevSemi:open_paren] 

923 # do nothing if the head starts with "if", "for" or "while" 

924 k = self.skip_ws(head, 0) 

925 if k >= len(head) or not head[k].isalpha(): 

926 return 1 + self.skip_to_matching_bracket(aList, i) 

927 kk = self.skip_past_word(head, k) 

928 if kk > k: 

929 headString = ''.join(head[k:kk]) 

930 # C keywords that might be followed by '{' 

931 # print "headString:", headString 

932 if headString in [ 

933 "class", "do", "for", "if", "struct", "switch", "while"]: 

934 return 1 + self.skip_to_matching_bracket(aList, i) 

935 args = aList[open_paren : close + 1] 

936 k = 1 + self.skip_to_matching_bracket(aList, i) 

937 body = aList[close + 1 : k] 

938 head = self.massageFunctionHead(head) 

939 args = self.massageFunctionArgs(args) 

940 body = self.massageFunctionBody(body) 

941 result = [] 

942 if head: 

943 result.extend(head) 

944 if args: 

945 result.extend(args) 

946 if body: 

947 result.extend(body) 

948 aList[prevSemi:k] = result 

949 return prevSemi + len(result) 

950 #@+node:ekr.20150514063305.170: *7* massageFunctionHead (sets .class_name) 

951 def massageFunctionHead(self, head): 

952 result: List[Any] = [] 

953 prevWord = [] 

954 self.class_name = '' 

955 i = 0 

956 while i < len(head): 

957 i = self.skip_ws_and_nl(head, i) 

958 if i < len(head) and head[i].isalpha(): 

959 result = [] 

960 j = self.skip_past_word(head, i) 

961 prevWord = head[i:j] 

962 i = j 

963 # look for ::word2 

964 i = self.skip_ws(head, i) 

965 if self.match(head, i, "::"): 

966 # Set the global to the class name. 

967 self.class_name = ''.join(prevWord) 

968 # print(class name:", self.class_name) 

969 i = self.skip_ws(head, i + 2) 

970 if i < len(head) and (head[i] == '~' or head[i].isalpha()): 

971 j = self.skip_past_word(head, i) 

972 if head[i:j] == prevWord: 

973 result.extend('__init__') 

974 elif head[i] == '~' and head[i + 1 : j] == prevWord: 

975 result.extend('__del__') 

976 else: 

977 # result.extend(list('::')) 

978 result.extend(head[i:j]) 

979 i = j 

980 else: 

981 result.extend(prevWord) 

982 else: i += 1 

983 finalResult = list("def ") 

984 finalResult.extend(result) 

985 return finalResult 

986 #@+node:ekr.20150514063305.169: *7* massageFunctionArgs 

987 def massageFunctionArgs(self, args): 

988 assert args[0] == '(' 

989 assert args[-1] == ')' 

990 result = ['('] 

991 lastWord = [] 

992 if self.class_name: 

993 for item in list("self,"): 

994 result.append(item) #can put extra comma 

995 i = 1 

996 while i < len(args): 

997 i = self.skip_ws_and_nl(args, i) 

998 ch = args[i] 

999 if ch.isalpha(): 

1000 j = self.skip_past_word(args, i) 

1001 lastWord = args[i:j] 

1002 i = j 

1003 elif ch == ',' or ch == ')': 

1004 for item in lastWord: 

1005 result.append(item) 

1006 if lastWord and ch == ',': 

1007 result.append(',') 

1008 lastWord = [] 

1009 i += 1 

1010 else: i += 1 

1011 if result[-1] == ',': 

1012 del result[-1] 

1013 result.append(')') 

1014 result.append(':') 

1015 # print "new args:", ''.join(result) 

1016 return result 

1017 #@+node:ekr.20150514063305.171: *7* massageFunctionBody & helpers 

1018 def massageFunctionBody(self, body): 

1019 body = self.massageIvars(body) 

1020 body = self.removeCasts(body) 

1021 body = self.removeTypeNames(body) 

1022 body = self.dedentBlocks(body) 

1023 return body 

1024 #@+node:ekr.20150514063305.172: *8* dedentBlocks 

1025 def dedentBlocks(self, body): 

1026 """Look for '{' preceded by '{' or '}' or ';' 

1027 (with intervening whitespace and comments). 

1028 """ 

1029 i = 0 

1030 while i < len(body): 

1031 j = i 

1032 ch = body[i] 

1033 if self.is_string_or_comment(body, i): 

1034 j = self.skip_string_or_comment(body, i) 

1035 elif ch in '{};': 

1036 # Look ahead ofr '{' 

1037 j += 1 

1038 while True: 

1039 k = j 

1040 j = self.skip_ws_and_nl(body, j) 

1041 if self.is_string_or_comment(body, j): 

1042 j = self.skip_string_or_comment(body, j) 

1043 if k == j: 

1044 break 

1045 assert k < j 

1046 if self.match(body, j, '{'): 

1047 k = j 

1048 j = self.skip_to_matching_bracket(body, j) 

1049 m = '# <Start dedented block>...' 

1050 body[k : k + 1] = list(m) 

1051 j += len(m) 

1052 while k < j: 

1053 progress = k 

1054 if body[k] == '\n': 

1055 k += 1 

1056 spaces = 0 

1057 while spaces < 4 and k < j: 

1058 if body[k] == ' ': 

1059 spaces += 1 

1060 k += 1 

1061 else: 

1062 break 

1063 if spaces > 0: 

1064 del body[k - spaces : k] 

1065 k -= spaces 

1066 j -= spaces 

1067 else: 

1068 k += 1 

1069 assert progress < k 

1070 m = ' # <End dedented block>' 

1071 body[j : j + 1] = list(m) 

1072 j += len(m) 

1073 else: 

1074 j = i + 1 

1075 # Defensive programming. 

1076 if i == j: 

1077 j += 1 

1078 assert i < j 

1079 i = j 

1080 return body 

1081 #@+node:ekr.20150514063305.173: *8* massageIvars 

1082 def massageIvars(self, body): 

1083 ivars = self.ivars_dict.get(self.class_name, []) 

1084 i = 0 

1085 while i < len(body): 

1086 if self.is_string_or_comment(body, i): 

1087 i = self.skip_string_or_comment(body, i) 

1088 elif body[i].isalpha(): 

1089 j = self.skip_past_word(body, i) 

1090 word = ''.join(body[i:j]) 

1091 # print "looking up:", word 

1092 if word in ivars: 

1093 # replace word by self.word 

1094 # print "replacing", word, " by self.", word 

1095 word = "self." + word 

1096 word = list(word) # type:ignore 

1097 body[i:j] = word 

1098 delta = len(word) - (j - i) 

1099 i = j + delta 

1100 else: i = j 

1101 else: i += 1 

1102 return body 

1103 #@+node:ekr.20150514063305.174: *8* removeCasts 

1104 def removeCasts(self, body): 

1105 i = 0 

1106 while i < len(body): 

1107 if self.is_string_or_comment(body, i): 

1108 i = self.skip_string_or_comment(body, i) 

1109 elif self.match(body, i, '('): 

1110 start = i 

1111 i = self.skip_ws(body, i + 1) 

1112 if body[i].isalpha(): 

1113 j = self.skip_past_word(body, i) 

1114 word = ''.join(body[i:j]) 

1115 i = j 

1116 if word in self.class_list or word in self.type_list: 

1117 i = self.skip_ws(body, i) 

1118 while self.match(body, i, '*'): 

1119 i += 1 

1120 i = self.skip_ws(body, i) 

1121 if self.match(body, i, ')'): 

1122 i += 1 

1123 # print "removing cast:", ''.join(body[start:i]) 

1124 del body[start:i] 

1125 i = start 

1126 else: i += 1 

1127 return body 

1128 #@+node:ekr.20150514063305.175: *8* removeTypeNames 

1129 def removeTypeNames(self, body): 

1130 """Do _not_ remove type names when preceeded by new.""" 

1131 i = 0 

1132 while i < len(body): 

1133 if self.is_string_or_comment(body, i): 

1134 i = self.skip_string_or_comment(body, i) 

1135 elif self.match_word(body, i, "new"): 

1136 i = self.skip_past_word(body, i) 

1137 i = self.skip_ws(body, i) 

1138 # don't remove what follows new. 

1139 if body[i].isalpha(): 

1140 i = self.skip_past_word(body, i) 

1141 elif body[i].isalpha(): 

1142 j = self.skip_past_word(body, i) 

1143 word = ''.join(body[i:j]) 

1144 if word in self.class_list or word in self.type_list: 

1145 j = self.skip_ws(body, j) 

1146 while self.match(body, j, '*'): 

1147 j += 1 

1148 # print "Deleting type name:", ''.join(body[i:j]) 

1149 j = self.skip_ws(body, j) 

1150 del body[i:j] 

1151 else: 

1152 i = j 

1153 else: i += 1 

1154 return body 

1155 #@-others 

1156 #@+node:ekr.20160111190632.1: *3* ccc.makeStubFiles 

1157 @cmd('make-stub-files') 

1158 def make_stub_files(self, event): # pragma: no cover 

1159 """ 

1160 Make stub files for all nearby @<file> nodes. 

1161 Take configuration settings from @x stub-y nodes. 

1162 """ 

1163 #@+others 

1164 #@+node:ekr.20160213070235.1: *4* class MakeStubFileAdapter 

1165 class MakeStubFileAdapter: # pragma: no cover 

1166 """ 

1167 An class that adapts leo/external/make_stub_files.py to Leo. 

1168 

1169 Settings are taken from Leo settings nodes, not a .cfg file. 

1170 """ 

1171 #@+others 

1172 #@+node:ekr.20160213070235.2: *5* msf.ctor & helpers 

1173 def __init__(self, c): 

1174 """MakeStubFile.ctor. From StandAloneMakeStubFile.ctor.""" 

1175 self.c = c 

1176 self.msf = msf = g.import_module('make_stub_files') 

1177 x = msf.StandAloneMakeStubFile() 

1178 # x is used *only* to init ivars. 

1179 # Ivars set on the command line... 

1180 self.config_fn = None 

1181 self.enable_unit_tests = False 

1182 self.files = [] # May also be set in the config file. 

1183 self.output_directory = self.finalize( 

1184 c.config.getString('stub-output-directory') or '.') 

1185 self.output_fn = None 

1186 self.overwrite = c.config.getBool('stub-overwrite', default=False) 

1187 self.trace_matches = c.config.getBool( 

1188 'stub-trace-matches', default=False) 

1189 self.trace_patterns = c.config.getBool( 

1190 'stub-trace-patterns', default=False) 

1191 self.trace_reduce = c.config.getBool('stub-trace-reduce', default=False) 

1192 self.trace_visitors = c.config.getBool( 

1193 'stub-trace-visitors', default=False) 

1194 self.update_flag = c.config.getBool('stub-update', default=False) 

1195 self.verbose = c.config.getBool('stub-verbose', default=False) 

1196 self.warn = c.config.getBool('stub-warn', default=False) 

1197 # Pattern lists & dicts, set by config sections... 

1198 self.patterns_dict = {} 

1199 self.names_dict = {} 

1200 self.def_patterns = self.scan_patterns('stub-def-name-patterns') 

1201 self.general_patterns = self.scan_patterns('stub-general-patterns') 

1202 self.prefix_lines = self.scan('stub-prefix-lines') 

1203 self.regex_patterns = self.scan_patterns('stub-regex-patterns') 

1204 # Complete the dicts. 

1205 x.make_patterns_dict() 

1206 self.patterns_dict = x.patterns_dict 

1207 self.op_name_dict = x.op_name_dict = x.make_op_name_dict() 

1208 # Copy the ivars. 

1209 x.def_patterns = self.def_patterns 

1210 x.general_patterns = self.general_patterns 

1211 x.regex_patterns = self.regex_patterns 

1212 x.prefix_lines = self.prefix_lines 

1213 #@+node:ekr.20160213070235.3: *6* msf.scan 

1214 def scan(self, kind): 

1215 """Return a list of *all* lines from an @data node, including comments.""" 

1216 c = self.c 

1217 aList = c.config.getData(kind, strip_comments=False, strip_data=False) 

1218 if not aList: 

1219 g.trace(f"warning: no @data {kind} node") 

1220 return aList 

1221 #@+node:ekr.20160213070235.4: *6* msf.scan_d 

1222 def scan_d(self, kind): 

1223 """Return a dict created from an @data node of the given kind.""" 

1224 c = self.c 

1225 aList = c.config.getData(kind, strip_comments=True, strip_data=True) 

1226 d = {} 

1227 if aList is None: 

1228 g.trace(f"warning: no @data {kind} node") 

1229 for s in aList or []: 

1230 name, value = s.split(':', 1) 

1231 d[name.strip()] = value.strip() 

1232 return d 

1233 #@+node:ekr.20160213070235.5: *6* msf.scan_patterns 

1234 def scan_patterns(self, kind): 

1235 """Parse the config section into a list of patterns, preserving order.""" 

1236 d = self.scan_d(kind) 

1237 aList = [] 

1238 seen = set() 

1239 for key in d: 

1240 value = d.get(key) 

1241 if key in seen: 

1242 g.trace('duplicate key', key) 

1243 else: 

1244 seen.add(key) 

1245 aList.append(self.msf.Pattern(key, value)) 

1246 return aList 

1247 #@+node:ekr.20160213070235.6: *5* msf.finalize 

1248 def finalize(self, fn): 

1249 """Finalize and regularize a filename.""" 

1250 return g.os_path_normpath(g.os_path_abspath(g.os_path_expanduser(fn))) 

1251 #@+node:ekr.20160213070235.7: *5* msf.make_stub_file 

1252 def make_stub_file(self, p): 

1253 """Make a stub file in ~/stubs for the @<file> node at p.""" 

1254 import ast 

1255 assert p.isAnyAtFileNode() 

1256 c = self.c 

1257 fn = p.anyAtFileNodeName() 

1258 if not fn.endswith('.py'): 

1259 g.es_print('not a python file', fn) 

1260 return 

1261 abs_fn = g.fullPath(c, p) 

1262 if not g.os_path_exists(abs_fn): 

1263 g.es_print('not found', abs_fn) 

1264 return 

1265 if g.os_path_exists(self.output_directory): 

1266 base_fn = g.os_path_basename(fn) 

1267 out_fn = g.os_path_finalize_join(self.output_directory, base_fn) 

1268 else: 

1269 g.es_print('not found', self.output_directory) 

1270 return 

1271 out_fn = out_fn[:-3] + '.pyi' 

1272 out_fn = g.os_path_normpath(out_fn) 

1273 self.output_fn = out_fn 

1274 # compatibility with stand-alone script 

1275 s = open(abs_fn).read() 

1276 node = ast.parse(s, filename=fn, mode='exec') 

1277 # Make the traverser *after* creating output_fn and output_directory ivars. 

1278 x = self.msf.StubTraverser(controller=self) 

1279 x.output_fn = self.output_fn 

1280 x.output_directory = self.output_directory 

1281 x.trace_matches = self.trace_matches 

1282 x.trace_patterns = self.trace_patterns 

1283 x.trace_reduce = self.trace_reduce 

1284 x.trace_visitors = self.trace_visitors 

1285 x.run(node) 

1286 #@+node:ekr.20160213070235.8: *5* msf.run 

1287 def run(self, p): 

1288 """Make stub files for all files in p's tree.""" 

1289 if p.isAnyAtFileNode(): 

1290 self.make_stub_file(p) 

1291 return 

1292 # First, look down tree. 

1293 after, p2 = p.nodeAfterTree(), p.firstChild() 

1294 found = False 

1295 while p2 and p != after: 

1296 if p2.isAnyAtFileNode(): 

1297 self.make_stub_file(p2) 

1298 p2.moveToNext() 

1299 found = True 

1300 else: 

1301 p2.moveToThreadNext() 

1302 if not found: 

1303 # Look up the tree. 

1304 for p2 in p.parents(): 

1305 if p2.isAnyAtFileNode(): 

1306 self.make_stub_file(p2) 

1307 break 

1308 else: 

1309 g.es('no files found in tree:', p.h) 

1310 #@-others 

1311 #@-others 

1312 MakeStubFileAdapter(self.c).run(self.c.p) 

1313 #@+node:ekr.20160316091923.1: *3* ccc.python-to-coffeescript 

1314 @cmd('python-to-coffeescript') 

1315 def python2coffeescript(self, event): # pragma: no cover 

1316 """ 

1317 Converts python text to coffeescript text. The conversion is not 

1318 perfect, but it eliminates a lot of tedious text manipulation. 

1319 """ 

1320 #@+others 

1321 #@+node:ekr.20160316092837.1: *4* class Python_To_Coffeescript_Adapter 

1322 class Python_To_Coffeescript_Adapter: # pragma: no cover 

1323 """An interface class between Leo and leo/external/py2cs.py.""" 

1324 #@+others 

1325 #@+node:ekr.20160316112717.1: *5* py2cs.ctor 

1326 def __init__(self, c): 

1327 """Ctor for Python_To_Coffeescript_Adapter class.""" 

1328 self.c = c 

1329 self.files = [] 

1330 self.output_directory = self.finalize( 

1331 c.config.getString('py2cs-output-directory')) 

1332 # self.output_fn = None 

1333 self.overwrite = c.config.getBool('py2cs-overwrite', default=False) 

1334 # Connect to the external module. 

1335 self.py2cs = g.import_module('leo.external.py2cs') 

1336 #@+node:ekr.20160316093019.1: *5* py2cs.main 

1337 def main(self): 

1338 """Main line for Python_To_CoffeeScript class.""" 

1339 if self.py2cs: 

1340 self.run() 

1341 else: 

1342 g.es_print('can not load py2cs.py') 

1343 #@+node:ekr.20160316094011.7: *5* py2cs.finalize 

1344 def finalize(self, fn): 

1345 """Finalize and regularize a filename.""" 

1346 return g.os_path_normpath(g.os_path_abspath(g.os_path_expanduser(fn))) 

1347 #@+node:ekr.20160316094011.8: *5* py2cs.to_coffeescript 

1348 def to_coffeescript(self, p): 

1349 """Convert the @<file> node at p to a .coffee file.""" 

1350 assert p.isAnyAtFileNode() 

1351 c = self.c 

1352 fn = p.anyAtFileNodeName() 

1353 if not fn.endswith('.py'): 

1354 g.es_print('not a python file', fn) 

1355 return 

1356 abs_fn = g.fullPath(c, p) 

1357 if not g.os_path_exists(abs_fn): 

1358 g.es_print('not found', abs_fn) 

1359 return 

1360 if g.os_path_exists(self.output_directory): 

1361 base_fn = g.os_path_basename(fn) 

1362 out_fn = g.os_path_finalize_join(self.output_directory, base_fn) 

1363 else: 

1364 g.es_print('not found', self.output_directory) 

1365 return 

1366 out_fn = out_fn[:-3] + '.coffee' 

1367 out_fn = g.os_path_normpath(out_fn) 

1368 s = open(abs_fn).read() 

1369 # s = self.strip_sentinels(s) 

1370 if 0: 

1371 for z in g.splitLines(s)[:20]: 

1372 print(z.rstrip()) 

1373 x = self.py2cs.MakeCoffeeScriptController() 

1374 # copy ivars and run. 

1375 x.enable_unit_tests = False 

1376 x.files = [abs_fn,] 

1377 x.output_directory = self.output_directory 

1378 x.overwrite = self.overwrite 

1379 x.make_coffeescript_file(abs_fn, s=s) 

1380 #@+node:ekr.20160316094011.9: *5* py2cs.run 

1381 def run(self): 

1382 """Create .coffee files for all @<file> nodes in p's tree.""" 

1383 p = c.p 

1384 if p.isAnyAtFileNode(): 

1385 self.to_coffeescript(p) 

1386 return 

1387 # First, look down tree. 

1388 after, p2 = p.nodeAfterTree(), p.firstChild() 

1389 found = False 

1390 while p2 and p != after: 

1391 if p2.isAnyAtFileNode(): 

1392 self.to_coffeescript(p2) 

1393 p2.moveToNext() 

1394 found = True 

1395 else: 

1396 p2.moveToThreadNext() 

1397 if not found: 

1398 # Look up the tree. 

1399 for p2 in p.parents(): 

1400 if p2.isAnyAtFileNode(): 

1401 self.to_coffeescript(p2) 

1402 return 

1403 g.es_print('no files found in tree:', p.h) 

1404 #@+node:ekr.20160316141812.1: *5* py2cs.strip_sentinels 

1405 def strip_sentinels(self, s): 

1406 """ 

1407 Strip s of all sentinel lines. 

1408 This may be dubious because it destroys outline structure. 

1409 """ 

1410 delims = ['#', None, None] 

1411 return ''.join( 

1412 [z for z in g.splitLines(s) if not g.is_sentinel(z, delims)]) 

1413 #@-others 

1414 #@-others 

1415 c = self.c 

1416 Python_To_Coffeescript_Adapter(c).main() 

1417 c.bodyWantsFocus() 

1418 #@+node:ekr.20211013080132.1: *3* ccc.python-to-typescript 

1419 @cmd('python-to-typescript') 

1420 def pythonToTypescriptCommand(self, event): # pragma: no cover 

1421 """ 

1422 The python-to-typescript command converts python to typescript text. 

1423 The conversion is not perfect, but it eliminates a lot of tedious text 

1424 manipulation. 

1425  

1426 To use, select any @<file> node and execute python-to-typescript. The 

1427 command creates (safe!) results in the last top-level node of the 

1428 outline. 

1429 

1430 The command attempts no type analysis. It uses "void" as the type of 

1431 all functions and methods. However, the script will annotate 

1432 function/method arguments: 

1433 

1434 @data python-to-typescript-types in leoSettings.leo contains a list of 

1435 key/value pairs. Keys are argument names (as used in Leo); values are 

1436 typescript type names. 

1437 """ 

1438 c = self.c 

1439 self.PythonToTypescript(c).convert(c.p) 

1440 self.c.bodyWantsFocus() 

1441 #@+node:ekr.20211013080132.2: *4* class PythonToTypescript 

1442 #@@nobeautify 

1443 class PythonToTypescript: # pragma: no cover 

1444 

1445 # The handlers are clear as they are. 

1446 # pylint: disable=no-else-return 

1447 

1448 # Keys are argument names. Values are typescript types. 

1449 # Typescript can infer types of initialized kwargs. 

1450 types_d: Dict[str, str] = {} 

1451 

1452 #@+others 

1453 #@+node:ekr.20211020162251.1: *5* py2ts.ctor 

1454 def __init__(self, c, alias=None): 

1455 self.c = c 

1456 self.alias = alias # For scripts. An alias for 'self'. 

1457 data = c.config.getData('python-to-typescript-types') or [] 

1458 for line in data: 

1459 try: 

1460 key, value = line.split(',') 

1461 self.types_d [key.strip()] = value.strip() 

1462 except Exception: 

1463 g.es_print('ignoring bad key/value pair in @data python-to-typescript-types') 

1464 g.es_print(repr(line)) 

1465 #@+node:ekr.20211013081549.1: *5* py2ts.convert 

1466 def convert(self, p): 

1467 """ 

1468 The main line. 

1469  

1470 Convert p and all descendants as a child of a new last top-level node. 

1471 """ 

1472 c = self.c 

1473 # Create the parent node. It will be deleted. 

1474 parent = c.lastTopLevel().insertAfter() 

1475 # Convert p and all its descendants. 

1476 try: 

1477 self.convert_node(p, parent) 

1478 # Promote the translated node. 

1479 parent.promote() 

1480 parent.doDelete() 

1481 p = c.lastTopLevel() 

1482 p.h = p.h.replace('.py', '.ts').replace('@','@@') 

1483 c.redraw(p) 

1484 c.expandAllSubheads(p) 

1485 c.treeWantsFocusNow() 

1486 except Exception: 

1487 g.es_exception() 

1488 #@+node:ekr.20211013101327.1: *5* py2ts.convert_node 

1489 def convert_node(self, p, parent): 

1490 # Create a copy of p as the last child of parent. 

1491 target = parent.insertAsLastChild() 

1492 target.h = p.h # The caller will rename this node. 

1493 # Convert p.b into child.b 

1494 self.convert_body(p, target) 

1495 # Recursively create all descendants. 

1496 for child in p.children(): 

1497 self.convert_node(child, target) 

1498 #@+node:ekr.20211013102209.1: *5* py2ts.convert_body, handlers &helpers 

1499 patterns: Optional[Tuple] = None 

1500 

1501 def convert_body(self, p, target): 

1502 """ 

1503 Convert p.b into target.b. 

1504  

1505 This is the heart of the algorithm. 

1506 """ 

1507 # Calculate this table only once. 

1508 if not self.patterns: 

1509 self.patterns = ( 

1510 # Head: order matters. 

1511 (self.comment_pat, self.do_comment), 

1512 (self.docstring_pat, self.do_docstring), 

1513 (self.section_ref_pat, self.do_section_ref), 

1514 # Middle: order doesn't matter. 

1515 (self.class_pat, self.do_class), 

1516 (self.def_pat, self.do_def), 

1517 (self.elif_pat, self.do_elif), 

1518 (self.else_pat, self.do_else), 

1519 (self.except_pat, self.do_except), 

1520 (self.finally_pat, self.do_finally), 

1521 (self.for_pat, self.do_for), 

1522 (self.if_pat, self.do_if), 

1523 (self.import_pat, self.do_import), 

1524 (self.try_pat, self.do_try), 

1525 (self.while_pat, self.do_while), 

1526 (self.with_pat, self.do_with), 

1527 # Tail: order matters. 

1528 (self.trailing_comment_pat, self.do_trailing_comment) 

1529 ) 

1530 # The loop may change lines, but each line is scanned only once. 

1531 i, lines = 0, g.splitLines(self.pre_pass(p.b)) 

1532 old_lines = lines[:] 

1533 while i < len(lines): 

1534 progress = i 

1535 line = lines[i] 

1536 for (pattern, handler) in self.patterns: 

1537 m = pattern.match(line) 

1538 if m: 

1539 i = handler(i, lines, m, p) # May change lines. 

1540 break 

1541 else: 

1542 self.do_operators(i, lines, p) 

1543 self.do_semicolon(i, lines, p) 

1544 i += 1 

1545 assert progress < i 

1546 if False and g.unitTesting and lines != old_lines: 

1547 print(f"\nchanged {p.h}:\n") 

1548 for z in lines: 

1549 print(z.rstrip()) 

1550 # Run the post-pass 

1551 target.b = self.post_pass(lines) 

1552 # Munge target.h. 

1553 target.h = target.h.replace('__init__', 'constructor') 

1554 #@+node:ekr.20211018154815.1: *6* handlers 

1555 #@+node:ekr.20211014023141.1: *7* py2ts.do_class 

1556 class_pat = re.compile(r'^([ \t]*)class(.*):(.*)\n') 

1557 

1558 def do_class(self, i, lines, m, p): 

1559 

1560 j = self.find_indented_block(i, lines, m, p) 

1561 lws, base, tail = m.group(1), m.group(2).strip(), m.group(3).strip() 

1562 base_s = f" {base} " if base else '' 

1563 tail_s = f" // {tail}" if tail else '' 

1564 lines[i] = f"{lws}class{base_s}{{{tail_s}\n" 

1565 lines.insert(j, f"{lws}}}\n") 

1566 return i + 1 

1567 #@+node:ekr.20211013165615.1: *7* py2ts.do_comment 

1568 comment_pat = re.compile(r'^([ \t]*)#(.*)\n') 

1569 

1570 def do_comment(self, i, lines, m, p): 

1571 """Handle a stand-alone comment line.""" 

1572 lws, comment = m.group(1), m.group(2).strip() 

1573 if comment: 

1574 lines[i] = f"{lws}// {comment}\n" 

1575 else: 

1576 lines[i] = '\n' # Write blank line for an empty comment. 

1577 return i + 1 

1578 #@+node:ekr.20211013130041.1: *7* py2ts.do_def & helper 

1579 def_pat = re.compile(r'^([ \t]*)def[ \t]+([\w_]+)\s*\((.*)\):(.*)\n') 

1580 this_pat = re.compile(r'^.*?\bthis\b') # 'self' has already become 'this'. 

1581 

1582 def do_def(self, i, lines, m, p): 

1583 

1584 j = self.find_indented_block(i, lines, m, p) 

1585 lws, name, args, tail = m.group(1), m.group(2), m.group(3).strip(), m.group(4).strip() 

1586 args = self.do_args(args) 

1587 if name == '__init__': 

1588 name = 'constructor' 

1589 tail_s = f" // {tail}" if tail else '' 

1590 # Use void as a placeholder type. 

1591 type_s = ' ' if name == 'constructor' else ': void ' 

1592 function_s = ' ' if self.this_pat.match(lines[i]) else ' function ' 

1593 lines[i] = f"{lws}public{function_s}{name}({args}){type_s}{{{tail_s}\n" 

1594 lines.insert(j, f"{lws}}}\n") 

1595 return i + 1 

1596 #@+node:ekr.20211014031722.1: *8* py2ts.do_args 

1597 def do_args(self, args): 

1598 """Add type annotations and remove the 'self' argument.""" 

1599 result = [] 

1600 for arg in (z.strip() for z in args.split(',')): 

1601 # Omit the self arg. 

1602 if arg != 'this': # Already converted. 

1603 val = self.types_d.get(arg) 

1604 result.append(f"{arg}: {val}" if val else arg) 

1605 return ', '.join(result) 

1606 #@+node:ekr.20211013165952.1: *7* py2ts.do_docstring 

1607 docstring_pat = re.compile(r'^([ \t]*)r?("""|\'\'\')(.*)\n') 

1608 

1609 def do_docstring(self, i, lines, m, p): 

1610 """ 

1611 Convert a python docstring. 

1612  

1613 Always use the full multi-line typescript format, even for single-line 

1614 python docstrings. 

1615 """ 

1616 lws, delim, docstring = m.group(1), m.group(2), m.group(3).strip() 

1617 tail = docstring.replace(delim, '').strip() 

1618 lines[i] = f"{lws}/**\n" 

1619 if tail: 

1620 lines.insert(i + 1, f"{lws} * {tail}\n") 

1621 i += 1 

1622 if delim in docstring: 

1623 lines.insert(i + 1, f"{lws} */\n") 

1624 return i + 2 

1625 i += 1 

1626 while i < len(lines): 

1627 line = lines[i] 

1628 # Buglet: ignores whatever might follow. 

1629 tail = line.replace(delim, '').strip() 

1630 # pylint: disable=no-else-return 

1631 if delim in line: 

1632 if tail: 

1633 lines[i] = f"{lws} * {tail}\n" 

1634 lines.insert(i + 1, f"{lws} */\n") 

1635 return i + 2 

1636 else: 

1637 lines[i] = f"{lws} */\n" 

1638 return i + 1 

1639 elif tail: 

1640 lines[i] = f"{lws} * {tail}\n" 

1641 else: 

1642 lines[i] = f"{lws} *\n" 

1643 i += 1 

1644 return i 

1645 #@+node:ekr.20211014030113.1: *7* py2ts.do_except 

1646 except_pat = re.compile(r'^([ \t]*)except(.*):(.*)\n') 

1647 

1648 def do_except(self, i, lines, m, p): 

1649 

1650 j = self.find_indented_block(i, lines, m, p) 

1651 lws, error, tail = m.group(1), m.group(2).strip(), m.group(3).strip() 

1652 tail_s = f" // {tail}" if tail else '' 

1653 error_s = f" ({error}) " if error else '' 

1654 lines[i] = f"{lws}catch{error_s}{{{tail_s}\n" 

1655 lines.insert(j, f"{lws}}}\n") 

1656 return i + 1 

1657 #@+node:ekr.20211013141725.1: *7* py2ts.do_for 

1658 for1_s = r'^([ \t]*)for[ \t]+(.*):(.*)\n' # for (cond): 

1659 for2_s = r'^([ \t]*)for[ \t]*\((.*)\n' # for ( 

1660 

1661 for1_pat = re.compile(for1_s) 

1662 for2_pat = re.compile(for2_s) 

1663 for_pat = re.compile(fr"{for1_s}|{for2_s}") # Used by main loop. 

1664 

1665 def do_for(self, i, lines, m, p): 

1666 

1667 line = lines[i] 

1668 m1 = self.for1_pat.match(line) 

1669 m2 = self.for2_pat.match(line) 

1670 if m1: 

1671 j = self.find_indented_block(i, lines, m, p) 

1672 lws, cond, tail = m.group(1), m.group(2).strip(), m.group(3).strip() 

1673 cond_s = cond if cond.startswith('(') else f"({cond})" 

1674 tail_s = f" // {tail}" if tail else '' 

1675 lines[i] = f"{lws}for {cond_s} {{{tail_s}\n" 

1676 self.do_operators(i, lines, p) 

1677 lines.insert(j, f"{lws}}}\n") 

1678 return i + 1 

1679 else: 

1680 j = self.find_indented_block(i, lines, m2, p) 

1681 # Generate the 'for' line. 

1682 lws, tail = m2.group(1), m2.group(2).strip() 

1683 tail_s = f" // {tail}" if tail else '' 

1684 lines[i] = f"{lws}for ({tail_s}\n" 

1685 # Tell do_semicolons that lines[i:j] are not statements. 

1686 self.kill_semicolons(lines, i, j) 

1687 # Assume line[j] closes the paren. Insert '{' 

1688 lines[j] = lines[j].rstrip().replace(':', '') + ' {\n' 

1689 # Insert '}' 

1690 k = self.find_indented_block(j, lines, m2, p) 

1691 lines.insert(k, f"{lws}}}\n") 

1692 return i + 1 

1693 #@+node:ekr.20211017202104.1: *7* py2ts.do_import 

1694 import_s = r'^([ \t]*)import[ \t]+(.*)\n' 

1695 import_from_s = r'^([ \t]*)from[ \t]+(.*)[ \t]+import[ \t]+(.*)\n' 

1696 import_pat = re.compile(fr"{import_s}|{import_from_s}") # Used by main loop. 

1697 import1_pat = re.compile(import_s) 

1698 import2_pat = re.compile(import_from_s) 

1699 

1700 def do_import(self, i, lines, m, p): 

1701 

1702 line = lines[i] 

1703 m1 = self.import1_pat.match(line) 

1704 m2 = self.import2_pat.match(line) 

1705 # Comment out all imports. 

1706 if m1: 

1707 lws, import_list = m1.group(1), m1.group(2).strip() 

1708 lines[i] = f'{lws}// import "{import_list}"\n' 

1709 else: 

1710 lws, module, import_list = m2.group(1), m2.group(2).strip(), m2.group(3).strip() 

1711 lines[i] = f'{lws}// from "{module}" import {import_list}\n' 

1712 return i + 1 

1713 #@+node:ekr.20211014022432.1: *7* py2ts.do_elif 

1714 elif1_s = r'^([ \t]*)elif[ \t]+(.*):(.*)\n' # elif (cond): 

1715 elif2_s = r'^([ \t]*)elif[ \t]*\((.*)\n' # elif ( 

1716 

1717 elif1_pat = re.compile(elif1_s) 

1718 elif2_pat = re.compile(elif2_s) 

1719 elif_pat = re.compile(fr"{elif1_s}|{elif2_s}") # Used by main loop. 

1720 

1721 def do_elif(self, i, lines, m, p): 

1722 

1723 line = lines[i] 

1724 m1 = self.elif1_pat.match(line) 

1725 m2 = self.elif2_pat.match(line) 

1726 if m1: 

1727 j = self.find_indented_block(i, lines, m, p) 

1728 lws, cond, tail = m.group(1), m.group(2).strip(), m.group(3).strip() 

1729 cond_s = cond if cond.startswith('(') else f"({cond})" 

1730 tail_s = f" // {tail}" if tail else '' 

1731 lines[i] = f"{lws}else if {cond_s} {{{tail_s}\n" 

1732 lines.insert(j, f"{lws}}}\n") 

1733 self.do_operators(i, lines, p) 

1734 return i + 1 

1735 else: 

1736 j = self.find_indented_block(i, lines, m2, p) 

1737 # Generate the 'else if' line. 

1738 lws, tail = m2.group(1), m2.group(2).strip() 

1739 tail_s = f" // {tail}" if tail else '' 

1740 lines[i] = f"{lws}else if ({tail_s}\n" 

1741 # Tell do_semicolons that lines[i:j] are not statements. 

1742 self.kill_semicolons(lines, i, j) 

1743 # Assume line[j] closes the paren. Insert '{' 

1744 lines[j] = lines[j].rstrip().replace(':', '') + ' {\n' 

1745 # Insert '}' 

1746 k = self.find_indented_block(j, lines, m2, p) 

1747 lines.insert(k, f"{lws}}}\n") 

1748 return i + 1 

1749 

1750 #@+node:ekr.20211014022445.1: *7* py2ts.do_else 

1751 else_pat = re.compile(r'^([ \t]*)else:(.*)\n') 

1752 

1753 def do_else(self, i, lines, m, p): 

1754 

1755 j = self.find_indented_block(i, lines, m, p) 

1756 lws, tail = m.group(1), m.group(2).strip() 

1757 tail_s = f" // {tail}" if tail else '' 

1758 lines[i] = f"{lws}else {{{tail_s}\n" 

1759 lines.insert(j, f"{lws}}}\n") 

1760 return i + 1 

1761 #@+node:ekr.20211014022453.1: *7* py2ts.do_finally 

1762 finally_pat = re.compile(r'^([ \t]*)finally:(.*)\n') 

1763 

1764 def do_finally(self, i, lines, m, p): 

1765 

1766 j = self.find_indented_block(i, lines, m, p) 

1767 lws, tail = m.group(1), m.group(2).strip() 

1768 tail_s = f" // {tail}" if tail else '' 

1769 lines[i] = f"{lws}finally {{{tail_s}\n" 

1770 lines.insert(j, f"{lws}}}\n") 

1771 return i + 1 

1772 #@+node:ekr.20211013131016.1: *7* py2ts.do_if 

1773 if1_s = r'^([ \t]*)if[ \t]+(.*):(.*)\n' # if (cond): 

1774 if2_s = r'^([ \t]*)if[ \t]*\((.*)\n' # if ( 

1775 

1776 if1_pat = re.compile(if1_s) 

1777 if2_pat = re.compile(if2_s) 

1778 if_pat = re.compile(fr"{if1_s}|{if2_s}") # Used by main loop. 

1779 

1780 def do_if(self, i, lines, m, p): 

1781 

1782 line = lines[i] 

1783 m1 = self.if1_pat.match(line) 

1784 m2 = self.if2_pat.match(line) 

1785 if m1: 

1786 j = self.find_indented_block(i, lines, m1, p) 

1787 lws, cond, tail = m.group(1), m.group(2).strip(), m.group(3).strip() 

1788 cond_s = cond if cond.startswith('(') else f"({cond})" 

1789 tail_s = f" // {tail}" if tail else '' 

1790 lines[i] = f"{lws}if {cond_s} {{{tail_s}\n" 

1791 self.do_operators(i, lines, p) 

1792 lines.insert(j, f"{lws}}}\n") 

1793 return i + 1 

1794 else: 

1795 j = self.find_indented_block(i, lines, m2, p) 

1796 # Generate the 'if' line. 

1797 lws, tail = m2.group(1), m2.group(2).strip() 

1798 tail_s = f" // {tail}" if tail else '' 

1799 lines[i] = f"{lws}if ({tail_s}\n" 

1800 # Tell do_semicolons that lines[i:j] are not statements. 

1801 self.kill_semicolons(lines, i, j) 

1802 # Assume line[j] closes the paren. Insert '{' 

1803 lines[j] = lines[j].rstrip().replace(':', '') + ' {\n' 

1804 # Insert '}' 

1805 k = self.find_indented_block(j, lines, m2, p) 

1806 lines.insert(k, f"{lws}}}\n") 

1807 return i + 1 

1808 #@+node:ekr.20211018125503.1: *7* py2ts.do_section_ref 

1809 section_ref_pat = re.compile(r"^([ \t]*)(\<\<.*?\>\>)\s*(.*)$") 

1810 

1811 def do_section_ref(self, i, lines, m, p): 

1812 # Handle trailing code. 

1813 lws, section_name, tail = m.group(1), m.group(2), m.group(3).strip() 

1814 if tail.startswith('#'): 

1815 lines[i] = f"{lws}{section_name} // {tail[1:]}\n" 

1816 return i + 1 

1817 #@+node:ekr.20211014022506.1: *7* py2ts.do_try 

1818 try_pat = re.compile(r'^([ \t]*)try:(.*)\n') 

1819 

1820 def do_try(self, i, lines, m, p): 

1821 

1822 j = self.find_indented_block(i, lines, m, p) 

1823 lws, tail = m.group(1), m.group(2).strip() 

1824 tail_s = f" // {tail}" if tail else '' 

1825 lines[i] = f"{lws}try {{{tail_s}\n" 

1826 lines.insert(j, f"{lws}}}\n") 

1827 return i + 1 

1828 #@+node:ekr.20211013141809.1: *7* py2ts.do_while 

1829 while1_s = r'^([ \t]*)while[ \t]+(.*):(.*)\n' # while (cond): 

1830 while2_s = r'^([ \t]*)while[ \t]*\((.*)\n' # while ( 

1831 

1832 while1_pat = re.compile(while1_s) 

1833 while2_pat = re.compile(while2_s) 

1834 while_pat = re.compile(fr"{while1_s}|{while2_s}") # Used by main loop. 

1835 

1836 def do_while(self, i, lines, m, p): 

1837 

1838 line = lines[i] 

1839 m1 = self.while1_pat.match(line) 

1840 m2 = self.while2_pat.match(line) 

1841 if m1: 

1842 j = self.find_indented_block(i, lines, m, p) 

1843 lws, cond, tail = m.group(1), m.group(2).strip(), m.group(3).strip() 

1844 cond_s = cond if cond.startswith('(') else f"({cond})" 

1845 tail_s = f" // {tail}" if tail else '' 

1846 lines[i] = f"{lws}while {cond_s} {{{tail_s}\n" 

1847 self.do_operators(i, lines, p) 

1848 lines.insert(j, f"{lws}}}\n") 

1849 return i + 1 

1850 else: 

1851 j = self.find_indented_block(i, lines, m2, p) 

1852 # Generate the 'while' line. 

1853 lws, tail = m2.group(1), m2.group(2).strip() 

1854 tail_s = f" // {tail}" if tail else '' 

1855 lines[i] = f"{lws}while ({tail_s}\n" 

1856 # Tell do_semicolons that lines[i:j] are not statements. 

1857 self.kill_semicolons(lines, i, j) 

1858 # Assume line[j] closes the paren. Insert '{' 

1859 lines[j] = lines[j].rstrip().replace(':', '') + ' {\n' 

1860 # Insert '}' 

1861 k = self.find_indented_block(j, lines, m2, p) 

1862 lines.insert(k, f"{lws}}}\n") 

1863 return i + 1 

1864 

1865 #@+node:ekr.20211014022554.1: *7* py2ts.do_with 

1866 with_pat = re.compile(r'^([ \t]*)with(.*):(.*)\n') 

1867 

1868 def do_with(self, i, lines, m, p): 

1869 

1870 j = self.find_indented_block(i, lines, m, p) 

1871 lws, clause, tail = m.group(1), m.group(2).strip(), m.group(3).strip() 

1872 tail_s = f" // {tail}" if tail else '' 

1873 clause_s = f" ({clause}) " if clause else '' 

1874 lines[i] = f"{lws}with{clause_s}{{{tail_s}\n" 

1875 lines.insert(j, f"{lws}}}\n") 

1876 return i + 1 

1877 #@+node:ekr.20211013172540.1: *7* py2ts.do_trailing_comment 

1878 trailing_comment_pat = re.compile(r'^([ \t]*)(.*)#(.*)\n') 

1879 

1880 def do_trailing_comment(self, i, lines, m, p): 

1881 """ 

1882 Handle a trailing comment line. 

1883  

1884 All other patterns have already been scanned on the line. 

1885 """ 

1886 lws, statement, trailing_comment = m.group(1), m.group(2).rstrip(), m.group(3).strip() 

1887 statement_s = f"{statement};" if self.ends_statement(i, lines) else statement 

1888 lines[i] = f"{lws}{statement_s} // {trailing_comment}\n" 

1889 return i + 1 

1890 #@+node:ekr.20211022090919.1: *6* helpers 

1891 #@+node:ekr.20211017210122.1: *7* py2ts.do_operators 

1892 def do_operators(self, i, lines, p): 

1893 

1894 # Regex replacements. 

1895 table = ( 

1896 ('True', 'true'), 

1897 ('False', 'false'), 

1898 # ('None', 'null'), # Done in post-pass. 

1899 ('default', 'default_val'), 

1900 ('and', '&&'), 

1901 ('or', '||'), 

1902 ('is not', '!='), 

1903 ('is', '=='), 

1904 ('not', '!'), 

1905 ('assert', '// assert'), 

1906 ) 

1907 for a, b in table: 

1908 lines[i] = re.sub(fr"\b{a}\b", b, lines[i]) 

1909 

1910 #@+node:ekr.20211017134103.1: *7* py2ts.do_semicolon 

1911 def do_semicolon(self, i, lines, p): 

1912 """ 

1913 Insert a semicolon in lines[i] is appropriate. 

1914  

1915 No other handler has matched, so we know that the line: 

1916 - Does not end in a comment. 

1917 - Is not part of a docstring. 

1918 """ 

1919 # Honor the flag inserted by kill_semicolons. 

1920 flag = self.kill_semicolons_flag 

1921 if lines[i].endswith(flag): 

1922 lines[i] = lines[i].replace(flag, '\n') 

1923 return 

1924 # For now, use a maximal policy. 

1925 if self.ends_statement(i, lines): 

1926 lines[i] = f"{lines[i].rstrip()};\n" 

1927 

1928 

1929 #@+node:ekr.20211017135603.1: *7* py2ts.ends_statement 

1930 def ends_statement(self, i, lines): 

1931 """ 

1932 Return True if lines[i] ends a statement. 

1933  

1934 If so, the line should end with a semicolon, 

1935 before any trailing comment, that is. 

1936 """ 

1937 # https://stackoverflow.com/questions/38823062/ 

1938 s = lines[i].strip() 

1939 next_line = lines[i + 1] if i + 1 < len(lines) else '' 

1940 # Return False for blank lines. 

1941 if not s: 

1942 return False 

1943 # Return False for Leo directives. 

1944 if s.startswith('@'): 

1945 return False 

1946 # Return False for section references. 

1947 i = s.find('<<') 

1948 j = s.find('>>') 

1949 if -1 < i < j: 

1950 return False 

1951 # Return False if this line ends in any of the following: 

1952 if s.endswith(('{', '(', '[', ':', '||', '&&', '!', ',', '`')): 

1953 return False 

1954 # Return False if the next line starts with '{', '(', '['. 

1955 if next_line.lstrip().startswith(('[', '(', '[', '&&', '||', '!')): 

1956 return False 

1957 # Return False for '}' lines. 

1958 if s.startswith('}'): 

1959 return False 

1960 return True 

1961 #@+node:ekr.20211013123001.1: *7* py2ts.find_indented_block 

1962 lws_pat = re.compile(r'^([ \t]*)') 

1963 

1964 def find_indented_block(self, i, lines, m, p): 

1965 """Return j, the index of the line *after* the indented block.""" 

1966 # Scan for the first non-empty line with the same or less indentation. 

1967 lws = m.group(1) 

1968 j = i + 1 

1969 while j < len(lines): 

1970 line = lines[j] 

1971 m2 = self.lws_pat.match(line) 

1972 lws2 = m2.group(1) 

1973 if line.strip() and len(lws2) <= len(lws): 

1974 # Don't add a blank line at the end of a block. 

1975 if j > 1 and not lines[j - 1].strip(): 

1976 j -= 1 

1977 break 

1978 j += 1 

1979 return j 

1980 

1981 #@+node:ekr.20211020101415.1: *7* py2ts.kill_semicolons 

1982 kill_semicolons_flag = ' // **kill-semicolon**\n' # Must end with a newline. 

1983 

1984 def kill_semicolons(self, lines, i, j): 

1985 """ 

1986 Tell later calls to do_semicolon that lines[i : j] should *not* end with a semicolon. 

1987 """ 

1988 for n in range(i, j): 

1989 lines[n] = lines[n].rstrip() + self.kill_semicolons_flag 

1990 #@+node:ekr.20211016214742.1: *7* py2ts.move_docstrings 

1991 class_or_def_pat = re.compile(r'^(\s*)(public|class)\s+([\w_]+)') 

1992 

1993 def move_docstrings(self, lines): 

1994 """Move docstrings before the preceding class or def line.""" 

1995 i = 0 

1996 while i < len(lines): 

1997 m = self.class_or_def_pat.match(lines[i]) 

1998 i += 1 

1999 if not m: 

2000 continue 

2001 # Set j to the start of the docstring. 

2002 j = i 

2003 while j < len(lines): 

2004 if lines[j].strip(): 

2005 break 

2006 j += 1 

2007 if j >= len(lines): 

2008 continue 

2009 if not lines[j].strip().startswith('/**'): 

2010 continue 

2011 # Set k to the end of the docstring. 

2012 k = j 

2013 while k < len(lines) and '*/' not in lines[k]: 

2014 k += 1 

2015 if k >= len(lines): 

2016 g.printObj(lines[i-1:len(lines)-1], tag='OOPS') 

2017 continue 

2018 # Remove 4 blanks from the docstrings. 

2019 for n in range(j, k + 1): 

2020 if lines[n].startswith(' ' * 4): 

2021 lines[n] = lines[n][4:] 

2022 # Rearrange the lines. 

2023 lines[i-1 : k + 1] = lines[j : k + 1] + [lines[i-1]] 

2024 i = k + 1 

2025 ### return lines 

2026 #@+node:ekr.20211016200908.1: *7* py2ts.post_pass & helpers 

2027 def post_pass(self, lines): 

2028 

2029 # Munge lines in place 

2030 self.move_docstrings(lines) 

2031 self.do_f_strings(lines) 

2032 self.do_ternary(lines) 

2033 self.do_assignment(lines) # Do this last, so it doesn't add 'const' to inserted comments. 

2034 s = (''.join(lines) 

2035 .replace('@language python', '@language typescript') 

2036 .replace(self.kill_semicolons_flag, '\n') 

2037 ) 

2038 return re.sub(r'\bNone\b', 'null', s) 

2039 

2040 

2041 #@+node:ekr.20211021061023.1: *8* py2ts.do_assignment 

2042 assignment_pat = re.compile(r'^([ \t]*)(.*?)\s+=\s+(.*)$') # Require whitespace around the '=' 

2043 

2044 def do_assignment(self, lines): 

2045 """Add const to all non-tuple assignments.""" 

2046 # Do this late so that we can test for the ending semicolon. 

2047 

2048 # Suppression table. 

2049 # Missing elements are likely to cause this method to generate '= ='. 

2050 table = ( 

2051 ',', # Tuple assignment or mutli-line argument lists. 

2052 '*', # A converted docstring. 

2053 '`', # f-string. 

2054 '//', # Comment. 

2055 '=', # Condition. 

2056 # Keywords that might be followed by '=' 

2057 'class', 'def', 'elif', 'for', 'if', 'print', 'public', 'return', 'with', 'while', 

2058 ) 

2059 for i, s in enumerate(lines): 

2060 m = self.assignment_pat.match(s) 

2061 if m: 

2062 lws, lhs, rhs = m.group(1), m.group(2), m.group(3).rstrip() 

2063 if not any(z in lhs for z in table): 

2064 lines[i] = f"{lws}const {lhs} = {rhs}\n" 

2065 #@+node:ekr.20211020185016.1: *8* py2ts.do_f_strings 

2066 f_string_pat = re.compile(r'([ \t]*)(.*?)f"(.*?)"(.*)$') 

2067 

2068 def do_f_strings(self, lines): 

2069 

2070 i = 0 

2071 while i < len(lines): 

2072 progress = i 

2073 s = lines[i] 

2074 m = self.f_string_pat.match(s) 

2075 if not m: 

2076 i += 1 

2077 continue 

2078 lws, head, string, tail = m.group(1), m.group(2), m.group(3), m.group(4).rstrip() 

2079 string_s = ( 

2080 string.replace('{', '${') # Add the '$' 

2081 .replace('! ', 'not ') # Undo erroneous replacement. 

2082 ) 

2083 # Remove format strings. Not perfect, but seemingly good enough. 

2084 string_s = re.sub(r'\:[0-9]\.+[0-9]+[frs]', '', string_s) 

2085 string_s = re.sub(r'\![frs]', '', string_s) 

2086 # A hack. If the fstring is on a line by itself, remove a trailing ';' 

2087 if not head.strip() and tail.endswith(';'): 

2088 tail = tail[:-1].strip() 

2089 if 1: # Just replace the line. 

2090 lines[i] = f"{lws}{head}`{string_s}`{tail.rstrip()}\n" 

2091 i += 1 

2092 else: 

2093 # These comments quickly become annoying. 

2094 # Add the original line as a comment as a check. 

2095 lines[i] = f"{lws}// {s.strip()}\n" 

2096 # Add the replacement line. 

2097 lines.insert(i + 1, f"{lws}{head}`{string_s}`{tail.rstrip()}\n") 

2098 i += 2 

2099 assert i > progress 

2100 #@+node:ekr.20211021051033.1: *8* py2ts.do_ternary 

2101 ternary_pat1 = re.compile(r'^([ \t]*)(.*?)\s*=\s*(.*?) if (.*?) else (.*);$') # assignment 

2102 ternary_pat2 = re.compile(r'^([ \t]*)return\s+(.*?) if (.*?) else (.*);$') # return statement 

2103 

2104 def do_ternary(self, lines): 

2105 

2106 i = 0 

2107 while i < len(lines): 

2108 progress = i 

2109 s = lines[i] 

2110 m1 = self.ternary_pat1.match(s) 

2111 m2 = self.ternary_pat2.match(s) 

2112 if m1: 

2113 lws, target, a, cond, b = m1.group(1), m1.group(2), m1.group(3), m1.group(4), m1.group(5) 

2114 lines[i] = f"{lws}// {s.strip()}\n" 

2115 lines.insert(i + 1, f"{lws}{target} = {cond} ? {a} : {b};\n") 

2116 i += 2 

2117 elif m2: 

2118 lws, a, cond, b = m2.group(1), m2.group(2), m2.group(3), m2.group(4) 

2119 lines[i] = f"{lws}// {s.strip()}\n" 

2120 lines.insert(i + 1, f"{lws}return {cond} ? {a} : {b};\n") 

2121 i += 2 

2122 else: 

2123 i += 1 

2124 assert progress < i 

2125 #@+node:ekr.20211017044939.1: *7* py2ts.pre_pass 

2126 def pre_pass(self, s): 

2127 

2128 # Remove the python encoding lines. 

2129 s = s.replace('@first # -*- coding: utf-8 -*-\n', '') 

2130 

2131 # Replace 'self' by 'this' *everywhere*. 

2132 s = re.sub(r'\bself\b', 'this', s) 

2133 

2134 # Comment out @cmd decorators. 

2135 s = re.sub(r"^@cmd(.*?)$", r'// @cmd\1\n', s, flags=re.MULTILINE) 

2136 

2137 # Replace the alias for 'self' by 'this' *only* in specif contexts. 

2138 # Do *not* replace the alias everywhere: that could do great harm. 

2139 if self.alias: 

2140 s = re.sub(fr"\b{self.alias}\.", 'this.', s) 

2141 # Remove lines like `at = self`. 

2142 s = re.sub(fr"^\s*{self.alias}\s*=\s*this\s*\n", '', s, flags=re.MULTILINE) 

2143 # Remove lines like `at, c = self, self.c`. 

2144 s = re.sub( 

2145 fr"^(\s*){self.alias}\s*,\s*c\s*=\s*this,\s*this.c\n", 

2146 r'\1c = this.c\n', # do_assignment adds const. 

2147 s, 

2148 flags=re.MULTILINE) 

2149 # Remove lines like `at, p = self, self.p`. 

2150 s = re.sub(fr"^(\s*){self.alias}\s*,\s*p\s*=\s*this,\s*this.p\n", 

2151 r'\1p = this.p\n', # do_assignment adds const. 

2152 s, 

2153 flags=re.MULTILINE) 

2154 # Do this last. 

2155 s = re.sub(fr"\b{self.alias},", 'this,', s) 

2156 return s 

2157 #@-others 

2158 #@+node:ekr.20160316091843.2: *3* ccc.typescript-to-py 

2159 @cmd('typescript-to-py') 

2160 def tsToPy(self, event): # pragma: no cover 

2161 """ 

2162 The typescript-to-python command converts typescript text to python 

2163 text. The conversion is not perfect, but it eliminates a lot of tedious 

2164 text manipulation. 

2165 """ 

2166 #@+others 

2167 #@+node:ekr.20150514063305.176: *4* class TS_To_Python (To_Python) 

2168 class TS_To_Python(To_Python): # pragma: no cover 

2169 #@+others 

2170 #@+node:ekr.20150514063305.177: *5* ctor (TS_To_Python) 

2171 def __init__(self, c): 

2172 """Ctor for TS_To_Python class.""" 

2173 super().__init__(c) 

2174 self.class_name = '' 

2175 # The class name for the present function. Used to modify ivars. 

2176 #@+node:ekr.20150514063305.178: *5* convertCodeList (TS_To_Python) & helpers 

2177 def convertCodeList(self, aList): 

2178 r, sr = self.replace, self.safe_replace 

2179 # First... 

2180 r(aList, '\r', '') 

2181 self.mungeAllFunctions(aList) 

2182 self.mungeAllClasses(aList) 

2183 # Second... 

2184 sr(aList, ' -> ', '.') 

2185 sr(aList, '->', '.') 

2186 sr(aList, ' . ', '.') 

2187 # sr(aList, 'this.self', 'self') 

2188 sr(aList, '{', '') 

2189 sr(aList, '}', '') 

2190 sr(aList, 'else if', 'elif') 

2191 sr(aList, 'else', 'else:') 

2192 sr(aList, '&&', ' and ') 

2193 sr(aList, '||', ' or ') 

2194 sr(aList, 'true', 'True') 

2195 sr(aList, 'false', 'False') 

2196 sr(aList, 'null', 'None') 

2197 sr(aList, 'this', 'self') 

2198 sr(aList, 'try', 'try:') 

2199 sr(aList, 'catch', 'except:') 

2200 sr(aList, 'constructor', '__init__') 

2201 sr(aList, 'new ', '') 

2202 # sr(aList, 'var ','') 

2203 # var usually indicates something weird, or an uninited var, 

2204 # so it may be good to retain as a marker. 

2205 # Third... 

2206 self.handle_all_keywords(aList) 

2207 self.insert_not(aList) 

2208 self.removeSemicolonsAtEndOfLines(aList) 

2209 # after processing for keywords 

2210 self.comment_scope_ids(aList) 

2211 # Last... 

2212 self.removeBlankLines(aList) 

2213 self.removeExcessWs(aList) 

2214 # I usually don't like extra whitespace. YMMV. 

2215 sr(aList, ' and ', ' and ') 

2216 sr(aList, ' not ', ' not ') 

2217 sr(aList, ' or ', ' or ') 

2218 sr(aList, ' and ', ' and ') 

2219 sr(aList, ' not ', ' not ') 

2220 sr(aList, ' or ', ' or ') 

2221 sr(aList, ' :', ':') 

2222 sr(aList, ', ', ',') 

2223 sr(aList, ' ,', ',') 

2224 sr(aList, ' (', '(') 

2225 sr(aList, '( ', '(') 

2226 sr(aList, ' )', ')') 

2227 sr(aList, ') ', ')') 

2228 sr(aList, ' and(', ' and (') 

2229 sr(aList, ' not(', ' not (') 

2230 sr(aList, ' or(', ' or (') 

2231 sr(aList, ')and ', ') and ') 

2232 sr(aList, ')not ', ') not ') 

2233 sr(aList, ')or ', ') or ') 

2234 sr(aList, ')and(', ') and (') 

2235 sr(aList, ')not(', ') not (') 

2236 sr(aList, ')or(', ') or (') 

2237 sr(aList, '@language javascript', '@language python') 

2238 self.replaceComments(aList) # should follow all calls to safe_replace 

2239 self.removeTrailingWs(aList) 

2240 r(aList, '\t ', '\t') # happens when deleting declarations. 

2241 #@+node:ekr.20150514063305.179: *6* comment_scope_ids 

2242 def comment_scope_ids(self, aList): 

2243 """convert (public|private|export) aLine to aLine # (public|private|export)""" 

2244 scope_ids = ('public', 'private', 'export',) 

2245 i = 0 

2246 if any(self.match_word(aList, i, z) for z in scope_ids): 

2247 i = self.handle_scope_keyword(aList, i) 

2248 while i < len(aList): 

2249 progress = i 

2250 if self.is_string_or_comment(aList, i): 

2251 i = self.skip_string_or_comment(aList, i) 

2252 elif aList[i] == '\n': 

2253 i += 1 

2254 i = self.skip_ws(aList, i) 

2255 if any(self.match_word(aList, i, z) for z in scope_ids): 

2256 i = self.handle_scope_keyword(aList, i) 

2257 else: 

2258 i += 1 

2259 assert i > progress 

2260 # print "handAllKeywords2:", ''.join(aList) 

2261 #@+node:ekr.20150514063305.180: *7* handle_scope_keyword 

2262 def handle_scope_keyword(self, aList, i): 

2263 i1 = i 

2264 # pylint: disable=undefined-loop-variable 

2265 for word in ('public', 'private', 'export'): 

2266 if self.match_word(aList, i, word): 

2267 i += len(word) 

2268 break 

2269 else: 

2270 return None 

2271 # Skip any following spaces. 

2272 i2 = self.skip_ws(aList, i) 

2273 # Scan to the next newline: 

2274 i3 = self.skip_line(aList, i) 

2275 # Optional: move the word to a trailing comment. 

2276 comment: List[str] = list(f" # {word}") if False else [] 

2277 # Change the list in place. 

2278 aList[i1:i3] = aList[i2:i3] + comment 

2279 i = i1 + (i3 - i2) + len(comment) 

2280 return i 

2281 #@+node:ekr.20150514063305.181: *6* handle_all_keywords 

2282 def handle_all_keywords(self, aList): 

2283 """ 

2284 converts if ( x ) to if x: 

2285 converts while ( x ) to while x: 

2286 """ 

2287 statements = ('elif', 'for', 'if', 'while',) 

2288 i = 0 

2289 while i < len(aList): 

2290 if self.is_string_or_comment(aList, i): 

2291 i = self.skip_string_or_comment(aList, i) 

2292 elif any(self.match_word(aList, i, z) for z in statements): 

2293 i = self.handle_keyword(aList, i) 

2294 # elif ( 

2295 # self.match_word(aList,i,"if") or 

2296 # self.match_word(aList,i,"while") or 

2297 # self.match_word(aList,i,"for") or 

2298 # self.match_word(aList,i,"elif") 

2299 # ): 

2300 # i = self.handle_keyword(aList,i) 

2301 else: 

2302 i += 1 

2303 # print "handAllKeywords2:", ''.join(aList) 

2304 #@+node:ekr.20150514063305.182: *7* handle_keyword 

2305 def handle_keyword(self, aList, i): 

2306 if self.match_word(aList, i, "if"): 

2307 i += 2 

2308 elif self.match_word(aList, i, "elif"): 

2309 i += 4 

2310 elif self.match_word(aList, i, "while"): 

2311 i += 5 

2312 elif self.match_word(aList, i, "for"): 

2313 i += 3 

2314 else: assert False, 'not a keyword' 

2315 # Make sure one space follows the keyword. 

2316 k = i 

2317 i = self.skip_ws(aList, i) 

2318 if k == i: 

2319 c = aList[i] 

2320 aList[i : i + 1] = [' ', c] 

2321 i += 1 

2322 # Remove '(' and matching ')' and add a ':' 

2323 if aList[i] == "(": 

2324 # Look ahead. Don't remove if we span a line. 

2325 j = self.skip_to_matching_bracket(aList, i) 

2326 k = i 

2327 found = False 

2328 while k < j and not found: 

2329 found = aList[k] == '\n' 

2330 k += 1 

2331 if not found: 

2332 j = self.removeMatchingBrackets(aList, i) 

2333 if i < j < len(aList): 

2334 ch = aList[j] 

2335 aList[j : j + 1] = [ch, ":", " "] 

2336 j = j + 2 

2337 return j 

2338 return i 

2339 #@+node:ekr.20150514063305.183: *6* mungeAllClasses 

2340 def mungeAllClasses(self, aList): 

2341 """Scan for a '{' at the top level that is preceeded by ')' """ 

2342 i = 0 

2343 while i < len(aList): 

2344 progress = i 

2345 if self.is_string_or_comment(aList, i): 

2346 i = self.skip_string_or_comment(aList, i) 

2347 elif self.match_word(aList, i, 'class'): 

2348 i1 = i 

2349 i = self.skip_line(aList, i) 

2350 aList[i - 1 : i] = list(f"{aList[i - 1]}:") 

2351 s = ''.join(aList[i1:i]) 

2352 k = s.find(' extends ') 

2353 if k > -1: 

2354 k1 = k 

2355 k = g.skip_id(s, k + 1) 

2356 k = g.skip_ws(s, k) 

2357 if k < len(s) and g.is_c_id(s[k]): 

2358 k2 = g.skip_id(s, k) 

2359 word = s[k:k2] 

2360 aList[i1:i] = list(f"{s[:k1]} ({word})") 

2361 elif self.match_word(aList, i, 'interface'): 

2362 aList[i : i + len('interface')] = list('class') 

2363 i = self.skip_line(aList, i) 

2364 aList[i - 1 : i] = list(f"{aList[i - 1]}: # interface") 

2365 i = self.skip_line(aList, i) # Essential. 

2366 else: 

2367 i += 1 

2368 assert i > progress 

2369 #@+node:ekr.20150514063305.184: *6* mungeAllFunctions & helpers 

2370 def mungeAllFunctions(self, aList): 

2371 """Scan for a '{' at the top level that is preceeded by ')' """ 

2372 prevSemi = 0 # Previous semicolon: header contains all previous text 

2373 i = 0 

2374 firstOpen = None 

2375 while i < len(aList): 

2376 progress = i 

2377 if self.is_string_or_comment(aList, i): 

2378 j = self.skip_string_or_comment(aList, i) 

2379 prevSemi = j 

2380 elif self.match(aList, i, '('): 

2381 if not firstOpen: 

2382 firstOpen = i 

2383 j = i + 1 

2384 elif self.match(aList, i, ';'): 

2385 j = i + 1 

2386 prevSemi = j 

2387 elif self.match(aList, i, "{"): 

2388 j = self.handlePossibleFunctionHeader( 

2389 aList, i, prevSemi, firstOpen) 

2390 prevSemi = j 

2391 firstOpen = None # restart the scan 

2392 else: 

2393 j = i + 1 

2394 # Handle unusual cases. 

2395 if j <= progress: 

2396 j = progress + 1 

2397 assert j > progress 

2398 i = j 

2399 #@+node:ekr.20150514063305.185: *7* handlePossibleFunctionHeader 

2400 def handlePossibleFunctionHeader(self, aList, i, prevSemi, firstOpen): 

2401 """ 

2402 converts function header lines from typescript format to python format. 

2403 That is, converts 

2404 x1..nn w::y ( t1 z1,..tn zn) { C++ 

2405 (public|private|export) name (t1: z1, ... tn: zn { 

2406 to 

2407 def y (z1,..zn): { # (public|private|export) 

2408 """ 

2409 assert self.match(aList, i, "{") 

2410 prevSemi = self.skip_ws_and_nl(aList, prevSemi) 

2411 close = self.prevNonWsOrNlChar(aList, i) 

2412 if close < 0 or aList[close] != ')': 

2413 # Should not increase *Python* indent. 

2414 return 1 + self.skip_to_matching_bracket(aList, i) 

2415 if not firstOpen: 

2416 return 1 + self.skip_to_matching_bracket(aList, i) 

2417 close2 = self.skip_to_matching_bracket(aList, firstOpen) 

2418 if close2 != close: 

2419 return 1 + self.skip_to_matching_bracket(aList, i) 

2420 open_paren = firstOpen 

2421 assert aList[open_paren] == '(' 

2422 head = aList[prevSemi:open_paren] 

2423 # do nothing if the head starts with "if", "for" or "while" 

2424 k = self.skip_ws(head, 0) 

2425 if k >= len(head) or not head[k].isalpha(): 

2426 return 1 + self.skip_to_matching_bracket(aList, i) 

2427 kk = self.skip_past_word(head, k) 

2428 if kk > k: 

2429 headString = ''.join(head[k:kk]) 

2430 # C keywords that might be followed by '{' 

2431 # print "headString:", headString 

2432 if headString in ["do", "for", "if", "struct", "switch", "while"]: 

2433 return 1 + self.skip_to_matching_bracket(aList, i) 

2434 args = aList[open_paren : close + 1] 

2435 k = 1 + self.skip_to_matching_bracket(aList, i) 

2436 body = aList[close + 1 : k] 

2437 head = self.massageFunctionHead(head) 

2438 args = self.massageFunctionArgs(args) 

2439 body = self.massageFunctionBody(body) 

2440 result = [] 

2441 if head: 

2442 result.extend(head) 

2443 if args: 

2444 result.extend(args) 

2445 if body: 

2446 result.extend(body) 

2447 aList[prevSemi:k] = result 

2448 return prevSemi + len(result) 

2449 #@+node:ekr.20150514063305.186: *7* massageFunctionArgs 

2450 def massageFunctionArgs(self, args): 

2451 assert args[0] == '(' 

2452 assert args[-1] == ')' 

2453 result = ['('] 

2454 lastWord = [] 

2455 if self.class_name: 

2456 for item in list("self,"): 

2457 result.append(item) #can put extra comma 

2458 i = 1 

2459 while i < len(args): 

2460 i = self.skip_ws_and_nl(args, i) 

2461 ch = args[i] 

2462 if ch.isalpha(): 

2463 j = self.skip_past_word(args, i) 

2464 lastWord = args[i:j] 

2465 i = j 

2466 elif ch == ',' or ch == ')': 

2467 for item in lastWord: 

2468 result.append(item) 

2469 if lastWord and ch == ',': 

2470 result.append(',') 

2471 lastWord = [] 

2472 i += 1 

2473 else: i += 1 

2474 if result[-1] == ',': 

2475 del result[-1] 

2476 result.append(')') 

2477 result.append(':') 

2478 return result 

2479 #@+node:ekr.20150514063305.187: *7* massageFunctionHead (sets .class_name) 

2480 def massageFunctionHead(self, head): 

2481 result: List[Any] = [] 

2482 prevWord = [] 

2483 self.class_name = '' 

2484 i = 0 

2485 while i < len(head): 

2486 i = self.skip_ws_and_nl(head, i) 

2487 if i < len(head) and head[i].isalpha(): 

2488 result = [] 

2489 j = self.skip_past_word(head, i) 

2490 prevWord = head[i:j] 

2491 i = j 

2492 # look for ::word2 

2493 i = self.skip_ws(head, i) 

2494 if self.match(head, i, "::"): 

2495 # Set the global to the class name. 

2496 self.class_name = ''.join(prevWord) 

2497 # print(class name:", self.class_name) 

2498 i = self.skip_ws(head, i + 2) 

2499 if i < len(head) and (head[i] == '~' or head[i].isalpha()): 

2500 j = self.skip_past_word(head, i) 

2501 if head[i:j] == prevWord: 

2502 result.extend('__init__') 

2503 elif head[i] == '~' and head[i + 1 : j] == prevWord: 

2504 result.extend('__del__') 

2505 else: 

2506 # result.extend(list('::')) 

2507 result.extend(head[i:j]) 

2508 i = j 

2509 else: 

2510 result.extend(prevWord) 

2511 else: i += 1 

2512 finalResult = list("def ") 

2513 finalResult.extend(result) 

2514 return finalResult 

2515 #@+node:ekr.20150514063305.188: *7* massageFunctionBody & helper 

2516 def massageFunctionBody(self, body): 

2517 # body = self.massageIvars(body) 

2518 # body = self.removeCasts(body) 

2519 # body = self.removeTypeNames(body) 

2520 body = self.dedentBlocks(body) 

2521 return body 

2522 #@+node:ekr.20150514063305.189: *8* dedentBlocks 

2523 def dedentBlocks(self, body): 

2524 """ 

2525 Look for '{' preceded by '{' or '}' or ';' 

2526 (with intervening whitespace and comments). 

2527 """ 

2528 i = 0 

2529 while i < len(body): 

2530 j = i 

2531 ch = body[i] 

2532 if self.is_string_or_comment(body, i): 

2533 j = self.skip_string_or_comment(body, i) 

2534 elif ch in '{};': 

2535 # Look ahead ofr '{' 

2536 j += 1 

2537 while True: 

2538 k = j 

2539 j = self.skip_ws_and_nl(body, j) 

2540 if self.is_string_or_comment(body, j): 

2541 j = self.skip_string_or_comment(body, j) 

2542 if k == j: 

2543 break 

2544 assert k < j 

2545 if self.match(body, j, '{'): 

2546 k = j 

2547 j = self.skip_to_matching_bracket(body, j) 

2548 m = '# <Start dedented block>...' 

2549 body[k : k + 1] = list(m) 

2550 j += len(m) 

2551 while k < j: 

2552 progress = k 

2553 if body[k] == '\n': 

2554 k += 1 

2555 spaces = 0 

2556 while spaces < 4 and k < j: 

2557 if body[k] == ' ': 

2558 spaces += 1 

2559 k += 1 

2560 else: 

2561 break 

2562 if spaces > 0: 

2563 del body[k - spaces : k] 

2564 k -= spaces 

2565 j -= spaces 

2566 else: 

2567 k += 1 

2568 assert progress < k 

2569 m = ' # <End dedented block>' 

2570 body[j : j + 1] = list(m) 

2571 j += len(m) 

2572 else: 

2573 j = i + 1 

2574 # Defensive programming. 

2575 if i == j: 

2576 j += 1 

2577 assert i < j 

2578 i = j 

2579 return body 

2580 #@-others 

2581 #@-others 

2582 c = self.c 

2583 TS_To_Python(c).go() 

2584 c.bodyWantsFocus() 

2585 #@+node:ekr.20160321042444.1: *3* ccc.import-jupyter-notebook 

2586 @cmd('import-jupyter-notebook') 

2587 def importJupyterNotebook(self, event): # pragma: no cover 

2588 """Prompt for a Jupyter (.ipynb) file and convert it to a Leo outline.""" 

2589 try: 

2590 import nbformat 

2591 assert nbformat 

2592 except ImportError: 

2593 g.es_print('import-jupyter-notebook requires nbformat package') 

2594 return 

2595 from leo.plugins.importers.ipynb import Import_IPYNB 

2596 # was @-others 

2597 c = self.c 

2598 x = Import_IPYNB(c) 

2599 fn = x.get_file_name() 

2600 if fn: 

2601 p = c.lastTopLevel() 

2602 root = p.insertAfter() 

2603 root.h = fn 

2604 x.import_file(fn, root) 

2605 c.redraw(root) 

2606 c.bodyWantsFocus() 

2607 #@+node:ekr.20160321072007.1: *3* ccc.export-jupyter-notebook 

2608 @cmd('export-jupyter-notebook') 

2609 def exportJupyterNotebook(self, event): # pragma: no cover 

2610 """Convert the present outline to a .ipynb file.""" 

2611 from leo.plugins.writers.ipynb import Export_IPYNB 

2612 c = self.c 

2613 Export_IPYNB(c).export_outline(c.p) 

2614 #@-others 

2615#@-others 

2616#@-leo