Overview
| Comment: | [core] debug mode |
|---|---|
| Downloads: | Tarball | ZIP archive | SQL archive |
| Timelines: | family | ancestors | descendants | both | core | rg |
| Files: | files | file ages | folders |
| SHA3-256: |
cb932c349baf608f9007a69b430f9da6 |
| User & Date: | olr on 2018-06-09 09:46:58 |
| Other Links: | branch diff | manifest | tags |
Context
|
2018-06-11
| ||
| 09:11 | [core] gc engine: small code clarification check-in: 70e6105d8a user: olr tags: core, rg | |
|
2018-06-09
| ||
| 09:46 | [core] debug mode check-in: cb932c349b user: olr tags: core, rg | |
|
2018-06-08
| ||
| 17:56 | [build][core] backreferences for suggestions and messages check-in: 5e5ee6df40 user: olr tags: core, build, rg | |
Changes
Modified gc_core/py/lang_core/gc_engine.py from [28162284b3] to [0afa6afeb1].
| ︙ | |||
136 137 138 139 140 141 142 | 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 | - + + + |
dDA.clear()
try:
# regex parser
_, errs = _proofread(sText[iStart:iEnd], sRealText[iStart:iEnd], iStart, False, dDA, dPriority, sCountry, dOpt, bShowRuleId, bDebug, bContext)
aErrors.update(errs)
# token parser
oSentence = TokenSentence(sText[iStart:iEnd], sRealText[iStart:iEnd], iStart)
|
| ︙ | |||
640 641 642 643 644 645 646 | 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 | - + - - - + - - + + - + + - + + + - + + + |
lPointer = lNextPointer
# check arcs of first nodes
for dNode in self._getNextMatchingNodes(dToken, dGraph[0]):
lPointer.append({"iToken": dToken["i"], "dNode": dNode})
# check if there is rules to check for each pointer
for dPointer in lPointer:
if "<rules>" in dPointer["dNode"]:
|
| ︙ | |||
732 733 734 735 736 737 738 | 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 | - + |
p = PropertyValue()
p.Name = "FullCommentURL"
p.Value = sURL
xErr.aProperties = (p,)
else:
xErr.aProperties = ()
return xErr
|
| ︙ | |||
772 773 774 775 776 777 778 | 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 | - + - + |
if bContext:
dErr['sUnderlined'] = self.sSentence0[dErr["nStart"]:dErr["nEnd"]]
dErr['sBefore'] = self.sSentence0[max(0,dErr["nStart"]-80):dErr["nStart"]]
dErr['sAfter'] = self.sSentence0[dErr["nEnd"]:dErr["nEnd"]+80]
return dErr
def _expand (self, sMsg, nTokenOffset):
|
| ︙ | |||
818 819 820 821 822 823 824 | 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 | - + - |
# remove useless token
self.sSentence = self.sSentence[:self.nOffset+dToken["nStart"]] + " " * (dToken["nEnd"] - dToken["nStart"]) + self.sSentence[self.nOffset+dToken["nEnd"]:]
#print("removed:", dToken["sValue"])
else:
lNewToken.append(dToken)
if "sNewValue" in dToken:
# rewrite token and sentence
|
| ︙ |