searchusermenu
  • 发布文章
  • 消息中心
点赞
收藏
评论
分享
原创

tensorflow(savemodel)->onnx

2023-11-02 07:59:40
4
0
savemodel结构检查:
python ~/miniconda3/lib/python3.11/site-packages/tensorflow/python/tools/saved_model_cli.py show --dir ./bert/ --list_ops --all


转换命令:
python -m tf2onnx.convert --saved-model save_model_path/  --output model.onnx --opset 15  --tag tag_from_saved_model --signature_def signature_def_from_saved_model    


一致性检查:
import tensorflow as tf

import onnxruntime
import numpy as np


input_mask = np.random.random((1,100)).astype('int32')
input_type_ids = np.random.random((1, 100)).astype('int32')
input_word_ids = np.random.random((1, 100)).astype('int32')


encoder_inputs = {
        'input_mask': tf.convert_to_tensor(input_mask),
        'input_type_ids': tf.convert_to_tensor(input_type_ids),
        'input_word_ids': tf.convert_to_tensor(input_word_ids)
        }
ort_sess = onnxruntime.InferenceSession("./model.onnx")
ort_inputs = {ort_sess.get_inputs()[0].name:input_mask,ort_sess.get_inputs()[1].name: input_type_ids,ort_sess.get_inputs()[2].name:input_word_ids}


ort_outs = ort_sess.run(None, ort_inputs)
print("type(ort_outs)",type(ort_outs))
print("shape(ort_outs)",ort_outs[0].shape)

tf_model = tf.saved_model.load(export_dir="./bert",tags="serve")

tf_outs = tf_model(encoder_inputs)

print("type(tf_outs)",type(tf_outs))
print("shape(tf_outs)",tf_outs['pooled_output'].shape)
'''
type(ort_outs) <class 'list'>
shape(ort_outs) (1, 128)
type(tf_outs) <class 'dict'>
shape(tf_outs) (1, 128)

'''
np.testing.assert_allclose(tf_outs['pooled_output'], ort_outs[0], rtol=1e-05, atol=1e-05)
print("onnx model check finsh.")


python脚本执行解析字符串和判断分支进行对应转换的命令:

import subprocess

#input_string = "convert_before:savemodel convert_after:onnx model_path:./bert output_path:./onnx/ opset:15"
input_string = "convert_before:savemodel convert_after:onnx model_path:./bert/ output_path:./onnx/ opset:16 tag:serve signature_def:serving_default concrete_function:1"


variables = {}
key_value_pairs = input_string.split(" ")
for pair in key_value_pairs:
    key, value = pair.split(":")
    variables[key.strip()] = value.strip()

convert_before = variables["convert_before"]
convert_after = variables["convert_after"]


if(convert_before=="savemodel" and convert_after=="onnx"):


    model_path = variables.get("model_path")
    output_path = variables.get("output_path") + "/model.onnx"
    opset = int(variables.get("opset", 15))
    tag = variables.get("tag", "serve")  # 设置 "tag" 的默认值为 serve
    signature_def = variables.get("signature_def", "")
    concrete_function = variables.get("concrete_function", "")

    print("convert_before:", convert_before)
    print("convert_after:", convert_after)
    print("model_path:", model_path)
    print("output_path:", output_path)
    print("opset:", opset)
    print("tag:", tag)
    print("signature_def:", signature_def)
    print("concrete_function:", concrete_function)
    print("more information about this savemodel:")
    result = subprocess.run('pip show tensorflow | grep Location', shell=True, capture_output=True, text=True)
    if result.returncode == 0:
        location,lvalue = result.stdout.split(" ")
        print(lvalue)
    else:
        print(result.stderr)

    lvalue = lvalue.strip("\n")+"/tensorflow/python/tools/saved_model_cli.py"
    print("lvalue",lvalue)
    executable = "python"
    showcommand = "show"
    argsd = "--dir"
    argsa = "--all"
    command = f"{executable} {lvalue} {showcommand} {argsd} {model_path} {argsa}"
    process = subprocess.Popen(command, shell=True)
    return_code = process.wait()


    executable = "python"
    tf2onnx = "-m tf2onnx.convert"
    arg1 = "--saved-model"
    arg2 = "--output"
    arg3 = "--opset"
    arg4 = "--tag"
    arg5 = "--signature_def"
    arg6 = "--concrete_function"

    command = f"{executable} {tf2onnx} {arg1} {model_path} {arg2} {output_path} {arg3} {opset} {arg4} {tag} "

    if(signature_def!=""):
        command += f"{arg5} {signature_def} "
    if(concrete_function!=""):
        command += f"{arg6} {concrete_function}"


    process = subprocess.Popen(command, shell=True)

    return_code = process.wait()


0条评论
0 / 1000
l****n
28文章数
5粉丝数
l****n
28 文章 | 5 粉丝
原创

tensorflow(savemodel)->onnx

2023-11-02 07:59:40
4
0
savemodel结构检查:
python ~/miniconda3/lib/python3.11/site-packages/tensorflow/python/tools/saved_model_cli.py show --dir ./bert/ --list_ops --all


转换命令:
python -m tf2onnx.convert --saved-model save_model_path/  --output model.onnx --opset 15  --tag tag_from_saved_model --signature_def signature_def_from_saved_model    


一致性检查:
import tensorflow as tf

import onnxruntime
import numpy as np


input_mask = np.random.random((1,100)).astype('int32')
input_type_ids = np.random.random((1, 100)).astype('int32')
input_word_ids = np.random.random((1, 100)).astype('int32')


encoder_inputs = {
        'input_mask': tf.convert_to_tensor(input_mask),
        'input_type_ids': tf.convert_to_tensor(input_type_ids),
        'input_word_ids': tf.convert_to_tensor(input_word_ids)
        }
ort_sess = onnxruntime.InferenceSession("./model.onnx")
ort_inputs = {ort_sess.get_inputs()[0].name:input_mask,ort_sess.get_inputs()[1].name: input_type_ids,ort_sess.get_inputs()[2].name:input_word_ids}


ort_outs = ort_sess.run(None, ort_inputs)
print("type(ort_outs)",type(ort_outs))
print("shape(ort_outs)",ort_outs[0].shape)

tf_model = tf.saved_model.load(export_dir="./bert",tags="serve")

tf_outs = tf_model(encoder_inputs)

print("type(tf_outs)",type(tf_outs))
print("shape(tf_outs)",tf_outs['pooled_output'].shape)
'''
type(ort_outs) <class 'list'>
shape(ort_outs) (1, 128)
type(tf_outs) <class 'dict'>
shape(tf_outs) (1, 128)

'''
np.testing.assert_allclose(tf_outs['pooled_output'], ort_outs[0], rtol=1e-05, atol=1e-05)
print("onnx model check finsh.")


python脚本执行解析字符串和判断分支进行对应转换的命令:

import subprocess

#input_string = "convert_before:savemodel convert_after:onnx model_path:./bert output_path:./onnx/ opset:15"
input_string = "convert_before:savemodel convert_after:onnx model_path:./bert/ output_path:./onnx/ opset:16 tag:serve signature_def:serving_default concrete_function:1"


variables = {}
key_value_pairs = input_string.split(" ")
for pair in key_value_pairs:
    key, value = pair.split(":")
    variables[key.strip()] = value.strip()

convert_before = variables["convert_before"]
convert_after = variables["convert_after"]


if(convert_before=="savemodel" and convert_after=="onnx"):


    model_path = variables.get("model_path")
    output_path = variables.get("output_path") + "/model.onnx"
    opset = int(variables.get("opset", 15))
    tag = variables.get("tag", "serve")  # 设置 "tag" 的默认值为 serve
    signature_def = variables.get("signature_def", "")
    concrete_function = variables.get("concrete_function", "")

    print("convert_before:", convert_before)
    print("convert_after:", convert_after)
    print("model_path:", model_path)
    print("output_path:", output_path)
    print("opset:", opset)
    print("tag:", tag)
    print("signature_def:", signature_def)
    print("concrete_function:", concrete_function)
    print("more information about this savemodel:")
    result = subprocess.run('pip show tensorflow | grep Location', shell=True, capture_output=True, text=True)
    if result.returncode == 0:
        location,lvalue = result.stdout.split(" ")
        print(lvalue)
    else:
        print(result.stderr)

    lvalue = lvalue.strip("\n")+"/tensorflow/python/tools/saved_model_cli.py"
    print("lvalue",lvalue)
    executable = "python"
    showcommand = "show"
    argsd = "--dir"
    argsa = "--all"
    command = f"{executable} {lvalue} {showcommand} {argsd} {model_path} {argsa}"
    process = subprocess.Popen(command, shell=True)
    return_code = process.wait()


    executable = "python"
    tf2onnx = "-m tf2onnx.convert"
    arg1 = "--saved-model"
    arg2 = "--output"
    arg3 = "--opset"
    arg4 = "--tag"
    arg5 = "--signature_def"
    arg6 = "--concrete_function"

    command = f"{executable} {tf2onnx} {arg1} {model_path} {arg2} {output_path} {arg3} {opset} {arg4} {tag} "

    if(signature_def!=""):
        command += f"{arg5} {signature_def} "
    if(concrete_function!=""):
        command += f"{arg6} {concrete_function}"


    process = subprocess.Popen(command, shell=True)

    return_code = process.wait()


文章来自个人专栏
AI-llama大模型,go语言开发
28 文章 | 2 订阅
0条评论
0 / 1000
请输入你的评论
0
0