@@ -32,8 +32,8 @@ def get_file_host_endian(reader: GGUFReader) -> tuple[str, str]:
32
32
# please see the comments in the modify_gguf.py example.
33
33
def dump_metadata (reader : GGUFReader , args : argparse .Namespace ) -> None :
34
34
host_endian , file_endian = get_file_host_endian (reader )
35
- logger . info (f'* File is { file_endian } endian, script is running on a { host_endian } endian host.' )
36
- logger . info (f'* Dumping { len (reader .fields )} key/value pair(s)' )
35
+ print (f'* File is { file_endian } endian, script is running on a { host_endian } endian host.' ) # noqa: NP100
36
+ print (f'* Dumping { len (reader .fields )} key/value pair(s)' ) # noqa: NP100
37
37
for n , field in enumerate (reader .fields .values (), 1 ):
38
38
if not field .types :
39
39
pretty_type = 'N/A'
@@ -50,13 +50,13 @@ def dump_metadata(reader: GGUFReader, args: argparse.Namespace) -> None:
50
50
log_message += ' = {0}' .format (repr (str (bytes (field .parts [- 1 ]), encoding = 'utf8' )[:60 ]))
51
51
elif field .types [0 ] in reader .gguf_scalar_to_np :
52
52
log_message += ' = {0}' .format (field .parts [- 1 ][0 ])
53
- logger . info (log_message )
53
+ print (log_message ) # noqa: NP100
54
54
if args .no_tensors :
55
55
return
56
- logger . info (f'* Dumping { len (reader .tensors )} tensor(s)' )
56
+ print (f'* Dumping { len (reader .tensors )} tensor(s)' ) # noqa: NP100
57
57
for n , tensor in enumerate (reader .tensors , 1 ):
58
58
prettydims = ', ' .join ('{0:5}' .format (d ) for d in list (tensor .shape ) + [1 ] * (4 - len (tensor .shape )))
59
- logger . info (f' { n :5} : { tensor .n_elements :10} | { prettydims } | { tensor .tensor_type .name :7} | { tensor .name } ' )
59
+ print (f' { n :5} : { tensor .n_elements :10} | { prettydims } | { tensor .tensor_type .name :7} | { tensor .name } ' ) # noqa: NP100
60
60
61
61
62
62
def dump_metadata_json (reader : GGUFReader , args : argparse .Namespace ) -> None :
0 commit comments