|
16 | 16 | #include "../../includes/char_classification.h" |
17 | 17 | #include "../../memory-allocator/allocator.h" |
18 | 18 | #include "../../includes/env.h" |
| 19 | +#include <stdio.h> |
19 | 20 |
|
20 | 21 | int expand_variable(char **input, int index) |
21 | 22 | { |
@@ -85,23 +86,79 @@ void expand(t_token **head) |
85 | 86 | while (token) |
86 | 87 | { |
87 | 88 | if (token->type == UNQUOTED_WORD || token->type == DOUBLE_QUOTED_WORD) |
88 | | - expand_token(token, head, token_ptr, prev_ptr); |
| 89 | + expand_token(token, head, token_ptr, prev_ptr, &token); |
89 | 90 | prev_ptr = token_ptr; |
90 | 91 | token_ptr = &token->next; |
91 | 92 | token = token->next; |
92 | 93 | } |
93 | 94 | } |
94 | 95 |
|
95 | | -void internal_field_split(t_token **token_ptr) |
| 96 | +t_token *do_ifs(char *str) |
96 | 97 | { |
97 | | - char **new_words; |
| 98 | + //printf("do_ifs: %s\n", str); |
| 99 | + int len = ft_strlen(str); |
| 100 | + int i = 0; |
| 101 | + t_token *head = NULL; |
| 102 | + int last_was_word = 0; |
| 103 | + |
| 104 | + if (is_internal_field_sep(str[0])) { |
| 105 | + i++; |
| 106 | + if (i < len && is_whitespace(str[i]) && is_internal_field_sep(str[i])) { |
| 107 | + while (i < len && is_whitespace(str[i]) && is_internal_field_sep(str[i])) |
| 108 | + i++; |
| 109 | + if (is_internal_field_sep(str[i])) |
| 110 | + i++; |
| 111 | + } |
| 112 | + head = lexer_data_new((t_token){NULL, DELIMITER, NULL}); |
| 113 | + } |
| 114 | + |
| 115 | + while (i < len) { |
| 116 | + int start = i; |
| 117 | + int substrlen = 0; |
| 118 | + while (i < len && !is_internal_field_sep(str[i])) |
| 119 | + i++, substrlen++; |
| 120 | + |
| 121 | + if (last_was_word) |
| 122 | + lexer_data_append(&head, lexer_data_new((t_token){NULL, DELIMITER, NULL})); |
| 123 | + lexer_data_append(&head, lexer_data_new((t_token){ft_substr(str, start, substrlen), UNQUOTED_WORD, NULL})); |
| 124 | + last_was_word = 1; |
| 125 | + if (i < len && is_internal_field_sep(str[i])) { |
| 126 | + i++; |
| 127 | + while (i < len && is_whitespace(str[i])) |
| 128 | + i++; |
| 129 | + } |
| 130 | + } |
| 131 | + |
| 132 | + /* debug purposes - inspect tokens |
| 133 | +
|
| 134 | + t_token t = *head; |
| 135 | + while (t.next) { |
| 136 | + printf(t.type == UNQUOTED_WORD ? "'%s' ->" : "'DELIMITER' -> ", t.value); |
| 137 | + t = *t.next; |
| 138 | + } |
| 139 | + printf(t.type == UNQUOTED_WORD ? "'%s'\n" : "'DELIMITER'\n", t.value); |
| 140 | +*/ |
| 141 | + return head; |
| 142 | +} |
| 143 | + |
| 144 | +void internal_field_split(t_token **token_ptr, t_token **next_token_ptr) |
| 145 | +{ |
| 146 | + t_token *new_words; |
98 | 147 | t_token *token; |
99 | 148 |
|
100 | 149 | token = *token_ptr; |
101 | | - new_words = str_split(token->value, is_internal_field_sep); |
102 | | - if (str_arr_size(new_words) == 1) |
| 150 | + new_words = do_ifs(token->value); |
| 151 | + if (new_words == NULL || new_words->next == NULL) |
103 | 152 | return ; |
104 | | - safe_free(token->value); |
105 | | - insert_uword_tokens(token_ptr, new_words); |
106 | | - safe_free(new_words); |
| 153 | + |
| 154 | + // move expander cursor to the last token |
| 155 | + *next_token_ptr = get_last_lexer_data(new_words); |
| 156 | + |
| 157 | + // put new expanded tokens |
| 158 | + lexer_data_insert(token, new_words); |
| 159 | + |
| 160 | + // remove legacy token |
| 161 | + t_token **prev = find_token_ptr_before(token_ptr, token); |
| 162 | + remove_token(prev, token_ptr, token); |
| 163 | + |
107 | 164 | } |
0 commit comments