fix README details

fix multiline comment parse

add csv.py

parse csv no clashed
This commit is contained in:
lyon 2023-03-11 23:58:13 +08:00
parent ec8405d304
commit 02de084767
8 changed files with 267 additions and 34 deletions

View File

@ -358,10 +358,10 @@ while True:
print(readBuff)
```
![Hnet-image (3)](document/image/132943365-0f7059b3-4f9d-4989-a5ec-2cce72b0cc96.gif)
</details>
![Hnet-image (3)](document/image/132943365-0f7059b3-4f9d-4989-a5ec-2cce72b0cc96.gif)
## Demo 03 ADC

View File

@ -400,10 +400,10 @@ while True:
```
![mmexport1631351523907](document/image/132944185-0a01b1ba-8cf7-4f9f-9d73-fe9cbcd52f0b.png)
</details>
![mmexport1631351523907](document/image/132944185-0a01b1ba-8cf7-4f9f-9d73-fe9cbcd52f0b.png)
## Demo 04 PWM output
<details><summary>查看代码</summary>

View File

@ -247,10 +247,10 @@ while True:
print(readBuff)
```
![Hnet-image (3)](document/image/132943365-0f7059b3-4f9d-4989-a5ec-2cce72b0cc96.gif)
</details>
![Hnet-image (3)](document/image/132943365-0f7059b3-4f9d-4989-a5ec-2cce72b0cc96.gif)
## Demo 03 ADC

View File

@ -289,10 +289,10 @@ while True:
```
![mmexport1631351523907](document/image/132944185-0a01b1ba-8cf7-4f9f-9d73-fe9cbcd52f0b.png)
</details>
![mmexport1631351523907](document/image/132944185-0a01b1ba-8cf7-4f9f-9d73-fe9cbcd52f0b.png)
## Demo 04 PWM output
<details><summary>查看代码</summary>

View File

@ -12,7 +12,7 @@
// "program": "${workspaceFolder}/build/boot/demo06-pikamain/pikascript_demo06-pikamain",
"args": [
// "--gtest_filter=pikaui.*"
"--gtest_filter=*.common_issue_1b23f4*"
// "--gtest_filter=*.csv*"
],
"stopAtEntry": false,
"cwd": "${workspaceFolder}",

View File

@ -0,0 +1,218 @@
# SPDX-FileCopyrightText: 2003 Python Software Foundation
# SPDX-FileCopyrightText: 2017 Scott Shawcroft, written for Adafruit Industries
# SPDX-FileCopyrightText: Copyright (c) 2021 Alec Delaney
#
# SPDX-License-Identifier: MIT
# SPDX-License-Identifier: PSF-2.0
# SPDX-License-Identifier: 0BSD
"""
`pikapython_csv`
================================================================================
PikaPython helper library for working with CSV files
* Author(s): Alec Delaney
* Author(s): Lyon
"""
import re
class reader:
"""Basic CSV reader class that behaves like CPython's ``csv.reader()``
:param csvfile: The open file to read from
:type csvfile: io.TextIOWrapper
:param str delimiter: (Optional) The CSV delimiter, default is comma (,)
:param str quotechar: (Optional) The CSV quote character for encapsulating special characters
including the delimiter, default is double quotation mark (")
"""
def __init__(self, csvfile, delimiter=",", quotechar='"') -> None:
self.file_interator = csvfile
self.delimiter = delimiter
self.quotechar = quotechar
self._re_exp = "(\\{0}.+?\\{0}),|([^{1}]+)".format(quotechar, delimiter)
def __iter__(self):
return self
def __next__(self):
csv_value_list = []
row_string = self.file_interator.__next__()
while len(row_string) != 0:
if row_string.startswith(self.delimiter):
csv_value_list.append("")
row_string = row_string[1:]
continue
next_match = re.match(self._re_exp, row_string)
matches = next_match.groups()
if matches[0] is None:
latest_match = matches[1].strip("\r\n").strip("\n")
csv_value_list.append(latest_match.replace(self.quotechar * 2, self.quotechar))
else:
latest_match = matches[0].strip("\r\n").strip("\n")
csv_value_list.append(
latest_match[1:-1].replace(self.quotechar * 2, self.quotechar)
)
if len(row_string) != 0: # If anything is left in the list...
row_string = row_string[len(latest_match) :]
if row_string == self.delimiter:
csv_value_list.append("")
row_string = row_string[1:]
elif row_string == "\r\n" or row_string == "n":
row_string = ""
row_string = row_string[1:]
return csv_value_list
class writer:
"""Basic CSV writer class that behaves like CPython's ``csv.writer()``
:param csvfile: The open CSVfile to write to
:type csvfile: io.TextIOWrapper
:param str delimiter: (Optional) The CSV delimiter, default is comma (,)
:param str quotechar: (Optional) The CSV quote character for encapsulating special characters
including the delimiter, default is double quotation mark (")
"""
def __init__(self, csvfile, delimiter=",", quoterchar='"'):
self.file_iterator = csvfile
self.delimiter = delimiter
self.quotechar = quoterchar
self.newlinechar = "\r\n"
def writerow(self, seq):
"""Write a row to the CSV file
:param seq: The list of values to write, which must all be str or be able to
be cast to str
:type seq: Sequence[Any]
"""
str_seq = [str(entry) for entry in seq]
doub_quote_seq = [entry.replace(self.quotechar, self.quotechar * 2) for entry in str_seq]
quoted_seq = [self._apply_quotes(entry) for entry in doub_quote_seq]
parsed_str = (self.delimiter).join(quoted_seq)
self.file_iterator.write(parsed_str + self.newlinechar)
def writerows(self, rows):
"""Write multiple rows to the CSV file
:param rows: An iterable item that yields multiple rows to write (e.g., list)
:type rows: Iterable[Sequence[Any]]
"""
for row in rows:
self.writerow(row)
def _apply_quotes(self, entry):
"""Apply the quote character to entries as necessary
:param str entry: The entry to add the quote charcter to, if needed
"""
return (self.quotechar + entry + self.quotechar) if self.delimiter in entry else entry
# Ported from CPython's csv.py:
class DictReader:
"""CSV reader that maps rows to a dict according to given or inferred fieldnames,
it also accepts the delimiter and quotechar keywords
:param f: The open file to read from
:type f: io.TextIOWrapper
:param fieldnames: (Optional) The fieldnames for each of the columns, if none is given,
it will default to the whatever is in the first row of the CSV file
:type fieldnames: Sequence[str]
:param str restkey: (Optional) A key name for values that have no key (row is larger than
the length of fieldnames), default is None
:param restval: (Optional) A default value for keys that have no values (row is small
than the length of fieldnames, default is None
:type restval: Any
"""
def __init__(self, f, fieldnames=None, restkey=None, restval=None, **kwargs):
self.fieldnames = fieldnames
self.restkey = restkey
self.restval = restval
self.reader = reader(f, **kwargs)
self.line_num = 0
def __iter__(self):
return self
def __next__(self):
if self.line_num == 0:
if self.fieldnames is None:
self.fieldnames = next(self.reader)
row = next(self.reader)
row_dict = dict(zip(self.fieldnames, row))
length_fn = len(self.fieldnames)
length_row = len(row)
if length_fn < length_row:
row_dict[self.restkey] = row[length_fn:]
elif length_fn > length_row:
for key in self.fieldnames[length_row:]:
row_dict[key] = self.restval
self.line_num += 1
return row_dict
# Ported from CPython's csv.py
class DictWriter:
"""CSV writer that uses a dict to write the rows according fieldnames, it also accepts the
delimiter and quotechar keywords
:param f: The open file to write to
:type f: io.TextIOWrapper
:param fieldnames: The fieldnames for each of the comlumns
:type fieldnames: Sequence[str]
:param str restval: A default value for keys that have no values
:param str extrasaction: The action to perform if a key is encountered when parsing the dict
that is not included in the fieldnames parameter, either "raise" or "ignore". Ignore
raises a ValueError, and "ignore" simply ignore that key/value pair. Default behavior
is "raise"
"""
def __init__(self, f, fieldnames, restval="", extrasaction="raise", **kwargs):
self.fieldnames = fieldnames # list of keys for the dict
self.restval = restval # for writing short dicts
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError("extrasaction " "(%s)" " must be 'raise' or 'ignore'" % extrasaction)
self.extrasaction = extrasaction
self.writer = writer(f, **kwargs)
def writeheader(self):
"""Writes the header row to the CSV file"""
self.writerow(dict(zip(self.fieldnames, self.fieldnames)))
def _dict_to_tuple(self, rowdict):
if self.extrasaction == "raise":
wrong_fields = []
for field in rowdict.keys():
if field not in self.fieldnames:
wrong_fields.append(field)
if wrong_fields:
raise ValueError(
"dict contains fields not in fieldnames: "
+ ", ".join([repr(x) for x in wrong_fields])
)
return (rowdict.get(key, self.restval) for key in self.fieldnames)
def writerow(self, rowdict):
"""Writes a row to the CSV file
:param rowdict: The row to write as a dict, with keys of the DictWriter's
fieldnames parameter; values must be str or be able to be cast to str
:type rowdict: Dict[str, Any]
"""
return self.writer.writerow(self._dict_to_tuple(rowdict))
def writerows(self, rowdicts):
"""Writes multiple rows to the CSV files
:param rowdicts: An iterable item that yields multiple rows to write;
values in those rows must be str or be able to be cast to str
:type rowdicts: Iterable[Dict[str, Any]]
"""
return self.writer.writerows(map(self._dict_to_tuple, rowdicts))

View File

@ -2554,6 +2554,7 @@ static char* _Parser_linesToBytesOrAsm(Args* outBuffs,
sLineOrigin = strsGetFirstToken(&buffs, sPyLines + uLinesOffset, '\n');
sLine = strsCopy(&buffs, sLineOrigin);
/* line connection */
if (bIsLineConnection) {
bIsLineConnection = 0;
@ -2572,30 +2573,6 @@ static char* _Parser_linesToBytesOrAsm(Args* outBuffs,
aLineConnection = arg_strAppend(aLineConnection, sLine);
goto next_line;
}
Cursor_forEach(c, sLine) {
Cursor_iterStart(&c);
Cursor_iterEnd(&c);
}
Cursor_deinit(&c);
/* auto connection */
if (uLinesIndex < uLinesNum) {
if (c.branket_deepth > 0) {
aLineConnection = arg_strAppend(aLineConnection, sLine);
bIsLineConnection = 1;
goto next_line;
}
}
/* branket match failed */
if (c.branket_deepth != 0) {
sSingleASM = NULL;
goto parse_after;
}
/* support Tab */
sLine = strsReplace(&buffs, sLine, "\t", " ");
/* remove \r */
sLine = strsReplace(&buffs, sLine, "\r", "");
/* filter for not end \n */
if (Parser_isVoidLine(sLine)) {
@ -2618,6 +2595,33 @@ static char* _Parser_linesToBytesOrAsm(Args* outBuffs,
goto next_line;
}
/* support Tab */
sLine = strsReplace(&buffs, sLine, "\t", " ");
/* remove \r */
sLine = strsReplace(&buffs, sLine, "\r", "");
/* check auto connection */
Cursor_forEach(c, sLine) {
Cursor_iterStart(&c);
Cursor_iterEnd(&c);
}
Cursor_deinit(&c);
/* auto connection */
if (uLinesIndex < uLinesNum) {
if (c.branket_deepth > 0) {
aLineConnection = arg_strAppend(aLineConnection, sLine);
bIsLineConnection = 1;
goto next_line;
}
}
/* branket match failed */
if (c.branket_deepth != 0) {
sSingleASM = NULL;
goto parse_after;
}
parse_line:
/* parse single Line to Asm */
sSingleASM = Parser_LineToAsm(&buffs, sLine, &tBlockStack);
@ -2704,8 +2708,12 @@ char* Parser_fileToAsm(Args* outBuffs, char* filename) {
/* add '\n' at the end */
lines = strsAppend(&buffs, lines, "\n\n");
char* res = Parser_linesToAsm(&buffs, lines);
arg_deinit(file_arg);
if (NULL == res) {
goto __exit;
}
res = strsCopy(outBuffs, res);
__exit:
arg_deinit(file_arg);
strsDeinit(&buffs);
return res;
}

View File

@ -5511,13 +5511,20 @@ TEST(parser, common_issue_1b23f4c1bf) {
TEST(parser, str_join) {
g_PikaMemInfo.heapUsedMax = 0;
Args* buffs = New_strBuff();
char* pikaAsm =
Parser_fileToAsm(buffs, "test/python/builtin/str_join.py");
char* pikaAsm = Parser_fileToAsm(buffs, "test/python/builtin/str_join.py");
__platform_printf("%s", pikaAsm);
args_deinit(buffs);
EXPECT_EQ(pikaMemNow(), 0);
}
TEST(parser, csv) {
g_PikaMemInfo.heapUsedMax = 0;
Args* buffs = New_strBuff();
char* pikaAsm = Parser_fileToAsm(buffs, "package/pikascript/csv.py");
printf("%s", pikaAsm);
args_deinit(buffs);
EXPECT_EQ(pikaMemNow(), 0);
}
#endif