load

paddle.jit.load ( path, \*configs* ) [源代码]

将接口 paddle.jit.save 或者 paddle.static.save_inference_model 存储的模型载入为 paddle.jit.TranslatedLayer ,用于预测推理或者fine-tune训练。

注解

如果载入的模型是通过 paddle.static.save_inference_model 存储的,在使用它进行fine-tune训练时会存在一些局限: 1. 命令式编程模式不支持 LoDTensor ,所有原先输入变量或者参数依赖于LoD信息的模型暂时无法使用; 2. 所有存储模型的feed变量都需要被传入 Translatedlayer 的forward方法; 3. 原模型变量的 stop_gradient 信息已丢失且无法准确恢复; 4. 原模型参数的 trainable 信息已丢失且无法准确恢复。

参数

  • path (str) - 载入模型的路径前缀。格式为 dirname/file_prefix 或者 file_prefix

  • **config (dict, 可选) - 其他用于兼容的载入配置选项。这些选项将来可能被移除,如果不是必须使用,不推荐使用这些配置选项。默认为 None。目前支持以下配置选项:(1) model_filename (str) - paddle 1.x版本 save_inference_model 接口存储格式的预测模型文件名,原默认文件名为 __model__ ; (2) params_filename (str) - paddle 1.x版本 save_inference_model 接口存储格式的参数文件名,没有默认文件名,默认将各个参数分散存储为单独的文件。

返回

TranslatedLayer,一个能够执行存储模型的 Layer 对象。

代码示例

  1. 载入由接口 paddle.jit.save 存储的模型进行预测推理及fine-tune训练。

    1. import numpy as np
    2. import paddle
    3. import paddle.nn as nn
    4. import paddle.optimizer as opt
    5. BATCH_SIZE = 16
    6. BATCH_NUM = 4
    7. EPOCH_NUM = 4
    8. IMAGE_SIZE = 784
    9. CLASS_NUM = 10
    10. # define a random dataset
    11. class RandomDataset(paddle.io.Dataset):
    12. def __init__(self, num_samples):
    13. self.num_samples = num_samples
    14. def __getitem__(self, idx):
    15. image = np.random.random([IMAGE_SIZE]).astype('float32')
    16. label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
    17. return image, label
    18. def __len__(self):
    19. return self.num_samples
    20. class LinearNet(nn.Layer):
    21. def __init__(self):
    22. super(LinearNet, self).__init__()
    23. self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
    24. @paddle.jit.to_static
    25. def forward(self, x):
    26. return self._linear(x)
    27. def train(layer, loader, loss_fn, opt):
    28. for epoch_id in range(EPOCH_NUM):
    29. for batch_id, (image, label) in enumerate(loader()):
    30. out = layer(image)
    31. loss = loss_fn(out, label)
    32. loss.backward()
    33. opt.step()
    34. opt.clear_grad()
    35. print("Epoch {} batch {}: loss = {}".format(
    36. epoch_id, batch_id, np.mean(loss.numpy())))
    37. # 1. train & save model.
    38. # create network
    39. layer = LinearNet()
    40. loss_fn = nn.CrossEntropyLoss()
    41. adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
    42. # create data loader
    43. dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
    44. loader = paddle.io.DataLoader(dataset,
    45. batch_size=BATCH_SIZE,
    46. shuffle=True,
    47. drop_last=True,
    48. num_workers=2)
    49. # train
    50. train(layer, loader, loss_fn, adam)
    51. # save
    52. path = "example_model/linear"
    53. paddle.jit.save(layer, path)
    54. # 2. load model
    55. # load
    56. loaded_layer = paddle.jit.load(path)
    57. # inference
    58. loaded_layer.eval()
    59. x = paddle.randn([1, IMAGE_SIZE], 'float32')
    60. pred = loaded_layer(x)
    61. # fine-tune
    62. loaded_layer.train()
    63. adam = opt.Adam(learning_rate=0.001, parameters=loaded_layer.parameters())
    64. train(loaded_layer, loader, loss_fn, adam)
  2. 兼容载入由接口 paddle.fluid.io.save_inference_model 存储的模型进行预测推理及fine-tune训练。

    1. import numpy as np
    2. import paddle
    3. import paddle.static as static
    4. import paddle.nn as nn
    5. import paddle.optimizer as opt
    6. import paddle.nn.functional as F
    7. BATCH_SIZE = 16
    8. BATCH_NUM = 4
    9. EPOCH_NUM = 4
    10. IMAGE_SIZE = 784
    11. CLASS_NUM = 10
    12. # define a random dataset
    13. class RandomDataset(paddle.io.Dataset):
    14. def __init__(self, num_samples):
    15. self.num_samples = num_samples
    16. def __getitem__(self, idx):
    17. image = np.random.random([IMAGE_SIZE]).astype('float32')
    18. label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
    19. return image, label
    20. def __len__(self):
    21. return self.num_samples
    22. paddle.enable_static()
    23. image = static.data(name='image', shape=[None, 784], dtype='float32')
    24. label = static.data(name='label', shape=[None, 1], dtype='int64')
    25. pred = static.nn.fc(x=image, size=10, activation='softmax')
    26. loss = F.cross_entropy(input=pred, label=label)
    27. avg_loss = paddle.mean(loss)
    28. optimizer = paddle.optimizer.SGD(learning_rate=0.001)
    29. optimizer.minimize(avg_loss)
    30. place = paddle.CPUPlace()
    31. exe = static.Executor(place)
    32. exe.run(static.default_startup_program())
    33. # create data loader
    34. dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
    35. loader = paddle.io.DataLoader(dataset,
    36. feed_list=[image, label],
    37. places=place,
    38. batch_size=BATCH_SIZE,
    39. shuffle=True,
    40. drop_last=True,
    41. num_workers=2)
    42. # 1. train and save inference model
    43. for data in loader():
    44. exe.run(
    45. static.default_main_program(),
    46. feed=data,
    47. fetch_list=[avg_loss])
    48. model_path = "fc.example.model"
    49. paddle.fluid.io.save_inference_model(
    50. model_path, ["image"], [pred], exe)
    51. # 2. load model
    52. # enable dygraph mode
    53. paddle.disable_static(place)
    54. # load
    55. fc = paddle.jit.load(model_path)
    56. # inference
    57. fc.eval()
    58. x = paddle.randn([1, IMAGE_SIZE], 'float32')
    59. pred = fc(x)
    60. # fine-tune
    61. fc.train()
    62. loss_fn = nn.CrossEntropyLoss()
    63. adam = opt.Adam(learning_rate=0.001, parameters=fc.parameters())
    64. loader = paddle.io.DataLoader(dataset,
    65. places=place,
    66. batch_size=BATCH_SIZE,
    67. shuffle=True,
    68. drop_last=True,
    69. num_workers=2)
    70. for epoch_id in range(EPOCH_NUM):
    71. for batch_id, (image, label) in enumerate(loader()):
    72. out = fc(image)
    73. loss = loss_fn(out, label)
    74. loss.backward()
    75. adam.step()
    76. adam.clear_grad()
    77. print("Epoch {} batch {}: loss = {}".format(
    78. epoch_id, batch_id, np.mean(loss.numpy())))