about summary refs log tree commit diff
path: root/custom_mutators/gramatron/preprocess/gnf_converter.py
blob: 1e7c8b6cff3e7a471ebab9096579527d9cbcefc7 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
import sys
import re
import copy
import json
from string import ascii_uppercase
from itertools import combinations
from collections import defaultdict

NONTERMINALSET = []
COUNT = 1

def main(grammar_file, out, start):
    grammar = None
    # If grammar file is a preprocessed NT file, then skip preprocessing
    if '.json' in grammar_file:
        with open(grammar_file, 'r') as fd:
            grammar = json.load(fd)
    elif '.g4' in grammar_file:
        with open(grammar_file, 'r') as fd:
            data = fd.readlines()
        grammar = preprocess(data)
    else:
        raise('Unknwown file format passed. Accepts (.g4/.json)')

    with open('debug_preprocess.json', 'w+') as fd:
        json.dump(grammar, fd)
    grammar = remove_unit(grammar) # eliminates unit productions
    with open('debug_unit.json', 'w+') as fd:
        json.dump(grammar, fd)
    grammar = remove_mixed(grammar) # eliminate terminals existing with non-terminals
    with open('debug_mixed.json', 'w+') as fd:
        json.dump(grammar, fd)
    grammar = break_rules(grammar) # eliminate rules with more than two non-terminals
    with open('debug_break.json', 'w+') as fd:
        json.dump(grammar, fd)
    grammar = gnf(grammar)

    # Dump GNF form of the grammar with only reachable rules 
    # reachable_grammar = get_reachable(grammar, start)
    # with open('debug_gnf_reachable.json', 'w+') as fd:
    #     json.dump(reachable_grammar, fd)
    with open('debug_gnf.json', 'w+') as fd:
        json.dump(grammar, fd)

    grammar["Start"] = [start]
    with open(out, 'w+') as fd:
        json.dump(grammar, fd)

def get_reachable(grammar, start):
    '''
    Returns a grammar without dead rules
    '''
    reachable_nt = set()
    worklist = list()
    processed = set()
    reachable_grammar = dict()
    worklist.append(start)

    while worklist:
        nt = worklist.pop(0)
        processed.add(nt)
        reachable_grammar[nt] = grammar[nt]
        rules = grammar[nt]
        for rule in rules:
            tokens = gettokens(rule)
            for token in tokens:
                if not isTerminal(token):
                    if token not in processed:
                        worklist.append(token)
    return reachable_grammar


def gettokens(rule):
    pattern = re.compile("([^\s\"\']+)|\"([^\"]*)\"|\'([^\']*)\'")
    return [matched.group(0) for matched in pattern.finditer(rule)]

def gnf(grammar):
    old_grammar = copy.deepcopy(grammar)
    new_grammar = defaultdict(list)
    isgnf = False
    while not isgnf:
        for lhs, rules in old_grammar.items():
            for rule in rules:
                tokens = gettokens(rule) 
                if len(tokens) == 1 and isTerminal(rule):
                    new_grammar[lhs].append(rule)
                    continue
                startoken = tokens[0]
                endrule = tokens[1:]
                if not isTerminal(startoken):
                    newrules = []
                    extendrules = old_grammar[startoken]
                    for extension in extendrules:
                        temprule = endrule[:]
                        temprule.insert(0, extension)
                        newrules.append(temprule)
                    for newnew in newrules:
                        new_grammar[lhs].append(' '.join(newnew))
                else:
                    new_grammar[lhs].append(rule)
        isgnf = True
        for lhs, rules in new_grammar.items():
            for rule in rules:
                # if "\' \'" or isTerminal(rule):
                tokens = gettokens(rule)
                if len(tokens) == 1 and isTerminal(rule):
                    continue
                startoken = tokens[0]
                if not isTerminal(startoken):
                    isgnf = False
                    break
        if not isgnf:
            old_grammar = copy.deepcopy(new_grammar)
            new_grammar = defaultdict(list)
    return new_grammar
                

def preprocess(data):
    productions = []
    production = []
    for line in data:
        if line != '\n': 
            production.append(line)
        else:
            productions.append(production)
            production = []
    final_rule_set = {}
    for production in productions:
        rules = []
        init = production[0]
        nonterminal = init.split(':')[0]
        rules.append(strip_chars(init.split(':')[1]).strip('| '))
        for production_rule in production[1:]:
            rules.append(strip_chars(production_rule.split('|')[0]))
        final_rule_set[nonterminal] = rules
    # for line in data:
    #     if line != '\n':
    #         production.append(line)
    return final_rule_set

def remove_unit(grammar):
    nounitproductions = False 
    old_grammar = copy.deepcopy(grammar)
    new_grammar = defaultdict(list)
    while not nounitproductions:
        for lhs, rules in old_grammar.items():
            for rhs in rules:
                # Checking if the rule is a unit production rule
                if len(gettokens(rhs)) == 1:
                    if not isTerminal(rhs):
                        new_grammar[lhs].extend([rule for rule in old_grammar[rhs]])
                    else:
                        new_grammar[lhs].append(rhs)
                else:
                    new_grammar[lhs].append(rhs)
        # Checking there are no unit productions left in the grammar 
        nounitproductions = True
        for lhs, rules in new_grammar.items():
            for rhs in rules:
                if len(gettokens(rhs)) == 1:
                    if not isTerminal(rhs):
                        nounitproductions = False
                        break
            if not nounitproductions:
                break
        # Unit productions are still there in the grammar -- repeat the process
        if not nounitproductions:
            old_grammar = copy.deepcopy(new_grammar)
            new_grammar = defaultdict(list)
    return new_grammar

def isTerminal(rule):
    # pattern = re.compile("([r]*\'[\s\S]+\')")
    pattern = re.compile("\'(.*?)\'")
    match = pattern.match(rule)
    if match:
        return True
    else:
        return False

def remove_mixed(grammar):
    '''
    Remove rules where there are terminals mixed in with non-terminals
    '''
    new_grammar = defaultdict(list)
    for lhs, rules in grammar.items():
        for rhs in rules:
            # tokens = rhs.split(' ')
            regen_rule = []
            tokens = gettokens(rhs)
            if len(gettokens(rhs)) == 1:
                new_grammar[lhs].append(rhs)
                continue
            for token in tokens:
                # Identify if there is a terminal in the RHS
                if isTerminal(token):
                    # Check if a corresponding nonterminal already exists
                    nonterminal = terminal_exist(token, new_grammar)
                    if nonterminal:
                        regen_rule.append(nonterminal)
                    else:
                        new_nonterm = get_nonterminal()
                        new_grammar[new_nonterm].append(token)
                        regen_rule.append(new_nonterm)
                else:
                    regen_rule.append(token)
            new_grammar[lhs].append(' '.join(regen_rule))
    return new_grammar

def break_rules(grammar):
    new_grammar = defaultdict(list)
    old_grammar = copy.deepcopy(grammar)
    nomulti = False
    while not nomulti:
        for lhs, rules in old_grammar.items():
            for rhs in rules:
                tokens = gettokens(rhs)
                if len(tokens) > 2 and (not isTerminal(rhs)):
                    split = tokens[:-1] 
                    nonterminal = terminal_exist(' '.join(split), new_grammar)
                    if nonterminal:
                        newrule = ' '.join([nonterminal, tokens[-1]])
                        new_grammar[lhs].append(newrule)
                    else:
                        nonterminal = get_nonterminal()
                        new_grammar[nonterminal].append(' '.join(split))
                        newrule = ' '.join([nonterminal, tokens[-1]])
                        new_grammar[lhs].append(newrule)
                else:
                    new_grammar[lhs].append(rhs)
        nomulti = True
        for lhs, rules in new_grammar.items():
            for rhs in rules:
                # tokens = rhs.split(' ')
                tokens = gettokens(rhs)
                if len(tokens) > 2 and (not isTerminal(rhs)):
                    nomulti = False
                    break
        if not nomulti:
            old_grammar = copy.deepcopy(new_grammar)
            new_grammar = defaultdict(list)
    return new_grammar

def strip_chars(rule):
    return rule.strip('\n\t ')

def get_nonterminal():
    global NONTERMINALSET
    if NONTERMINALSET:
        return NONTERMINALSET.pop(0)
    else:
        _repopulate()
        return NONTERMINALSET.pop(0)

def _repopulate():
    global COUNT
    global NONTERMINALSET
    NONTERMINALSET = [''.join(x) for x in list(combinations(ascii_uppercase, COUNT))]
    COUNT += 1

def terminal_exist(token, grammar):
    for nonterminal, rules in grammar.items():
        if token in rules:
            return nonterminal
    return None



if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser(description = 'Script to convert grammar to GNF form')
    parser.add_argument(
            '--gf',
            type = str,
            required = True,
            help = 'Location of grammar file')
    parser.add_argument(
            '--out',
            type = str,
            required = True,
            help = 'Location of output file')
    parser.add_argument(
            '--start',
            type = str,
            required = True,
            help = 'Start token')
    args = parser.parse_args()

    main(args.gf, args.out, args.start)