Skip to content

Commit

Permalink
fix(neuralop): init uno model weights as torch
Browse files Browse the repository at this point in the history
  • Loading branch information
xiaoyewww committed Dec 25, 2024
1 parent a27e33d commit 7918de1
Showing 1 changed file with 17 additions and 0 deletions.
17 changes: 17 additions & 0 deletions neuralop/models/uno.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import math
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import ppsci

from ..layers.fno_block import FNOBlocks
from ..layers.mlp import MLP
Expand All @@ -10,6 +12,19 @@
from ..layers.spectral_convolution import SpectralConv


def kaiming_init(layer):
if isinstance(layer, (nn.layer.conv._ConvNd, nn.Linear)):
print(f"layer: {layer} ")
init_kaimingUniform = paddle.nn.initializer.KaimingUniform(nonlinearity='leaky_relu', negative_slope=math.sqrt(5))
init_kaimingUniform(layer.weight)
if layer.bias is not None:
fan_in, _ = ppsci.utils.initializer._calculate_fan_in_and_fan_out(layer.weight)
if fan_in != 0:
bound = 1 / math.sqrt(fan_in)
init_uniform = paddle.nn.initializer.Uniform(low=-bound, high=bound)
init_uniform(layer.bias)


class UNO(nn.Layer):
"""U-Shaped Neural Operator [1]_
Expand Down Expand Up @@ -279,6 +294,8 @@ def __init__(
non_linearity=non_linearity,
)

self.apply(kaiming_init)

def forward(self, x, **kwargs):
x = self.lifting(x)

Expand Down

0 comments on commit 7918de1

Please sign in to comment.