@@ -53,6 +53,15 @@ def time_read_json_lines(self, index):
53
53
def time_read_json_lines_concat (self , index ):
54
54
concat (read_json (self .fname , orient = "records" , lines = True , chunksize = 25000 ))
55
55
56
+ def time_read_json_lines_concat_one (self , index ):
57
+ concat (read_json (self .fname , orient = "records" , lines = True , chunksize = 1 ))
58
+
59
+ def time_read_json_lines_concat_hundred (self , index ):
60
+ concat (read_json (self .fname , orient = "records" , lines = True , chunksize = 100 ))
61
+
62
+ def time_read_json_lines_concat_ten_thousand (self , index ):
63
+ concat (read_json (self .fname , orient = "records" , lines = True , chunksize = 10000 ))
64
+
56
65
def time_read_json_lines_read_one_chunk (self , index ):
57
66
iterator = read_json (self .fname , orient = "records" , lines = True , chunksize = 25000 )
58
67
for i , j in enumerate (iterator ):
@@ -83,6 +92,15 @@ def peakmem_read_json_lines(self, index):
83
92
def peakmem_read_json_lines_concat (self , index ):
84
93
concat (read_json (self .fname , orient = "records" , lines = True , chunksize = 25000 ))
85
94
95
+ def peakmem_read_json_lines_concat_one (self , index ):
96
+ concat (read_json (self .fname , orient = "records" , lines = True , chunksize = 1 ))
97
+
98
+ def peakmem_read_json_lines_concat_hundred (self , index ):
99
+ concat (read_json (self .fname , orient = "records" , lines = True , chunksize = 100 ))
100
+
101
+ def peakmem_read_json_lines_concat_ten_thousand (self , index ):
102
+ concat (read_json (self .fname , orient = "records" , lines = True , chunksize = 10000 ))
103
+
86
104
def peakmem_read_json_lines_one_chunk (self , index ):
87
105
iterator = read_json (self .fname , orient = "records" , lines = True , chunksize = 25000 )
88
106
for i , j in enumerate (iterator ):
0 commit comments