A Step-by-Step Coding Tutorial on NVIDIA PhysicsNeMo: Darcy Flow, FNOs, PINNs, Surrogate Models, and Inference Benchmarking

A Step-by-Step Coding Tutorial on NVIDIA PhysicsNeMo: Darcy Flow, FNOs, PINNs, Surrogate Models, and Inference Benchmarking


print(“\n” + “=”*80)
print(“SECTION 4: DATA VISUALIZATION”)
print(“=”*80)

def visualize_darcy_samples(
permeability: np.ndarray,
pressure: np.ndarray,
n_samples: int = 3
):
“””Visualize Darcy flow samples.”””
fig, axes = plt.subplots(n_samples, 2, figsize=(10, 4 * n_samples))

for i in range(n_samples):
im1 = axes[i, 0].imshow(permeability[i], cmap=’viridis’, origin=’lower’)
axes[i, 0].set_title(f’Permeability Field (Sample {i+1})’)
axes[i, 0].set_xlabel(‘x’)
axes[i, 0].set_ylabel(‘y’)
plt.colorbar(im1, ax=axes[i, 0], label=”k(x,y)”)

im2 = axes[i, 1].imshow(pressure[i], cmap=’hot’, origin=’lower’)
axes[i, 1].set_title(f’Pressure Field (Sample {i+1})’)
axes[i, 1].set_xlabel(‘x’)
axes[i, 1].set_ylabel(‘y’)
plt.colorbar(im2, ax=axes[i, 1], label=”u(x,y)”)

okex

plt.tight_layout()
plt.savefig(‘darcy_samples.png’, dpi=150, bbox_inches=”tight”)
plt.show()
print(“✓ Saved visualization to ‘darcy_samples.png'”)

visualize_darcy_samples(perm_train[:3], press_train[:3])

print(“\n” + “=”*80)
print(“SECTION 5: FOURIER NEURAL OPERATOR (FNO)”)
print(“=”*80)

“””
The Fourier Neural Operator (FNO) learns mappings between function spaces
by parameterizing the integral kernel in Fourier space.

Key insight: Convolution in physical space = multiplication in Fourier space

The FNO layer consists of:
1. FFT to transform to frequency domain
2. Multiplication with learnable weights (keeping only low-frequency modes)
3. Inverse FFT to transform back
4. Residual connection with a local linear transformation
“””

class SpectralConv2d(nn.Module):
“””
2D Spectral Convolution Layer for FNO.

Performs convolution in Fourier space by:
1. Computing FFT of input
2. Multiplying with complex learnable weights
3. Computing inverse FFT
“””

def __init__(
self,
in_channels: int,
out_channels: int,
modes1: int,
modes2: int
):
super().__init__()

self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = modes1
self.modes2 = modes2

self.scale = 1 / (in_channels * out_channels)

self.weights1 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, modes1, modes2, dtype=torch.cfloat)
)
self.weights2 = nn.Parameter(
self.scale * torch.rand(in_channels, out_channels, modes1, modes2, dtype=torch.cfloat)
)

def compl_mul2d(self, input: torch.Tensor, weights: torch.Tensor) -> torch.Tensor:
“””Complex multiplication for batch of 2D tensors.”””
return torch.einsum(“bixy,ioxy->boxy”, input, weights)

def forward(self, x: torch.Tensor) -> torch.Tensor:
batch_size = x.shape[0]

x_ft = torch.fft.rfft2(x)

out_ft = torch.zeros(
batch_size, self.out_channels, x.size(-2), x.size(-1) // 2 + 1,
dtype=torch.cfloat, device=x.device
)

out_ft[:, :, :self.modes1, :self.modes2] = \
self.compl_mul2d(x_ft[:, :, :self.modes1, :self.modes2], self.weights1)

out_ft[:, :, -self.modes1:, :self.modes2] = \
self.compl_mul2d(x_ft[:, :, -self.modes1:, :self.modes2], self.weights2)

x = torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1)))

return x

class FNOBlock(nn.Module):
“””
FNO Block combining spectral convolution with local linear transform.

output = σ(SpectralConv(x) + LocalLinear(x))
“””

def __init__(
self,
channels: int,
modes1: int,
modes2: int,
activation: str=”gelu”
):
super().__init__()

self.spectral_conv = SpectralConv2d(channels, channels, modes1, modes2)
self.local_linear = nn.Conv2d(channels, channels, 1)

self.activation = nn.GELU() if activation == ‘gelu’ else nn.ReLU()

def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.activation(self.spectral_conv(x) + self.local_linear(x))

class FourierNeuralOperator2D(nn.Module):
“””
Complete 2D Fourier Neural Operator for learning operators.

Architecture:
1. Lift input to higher dimensional channel space
2. Apply multiple FNO blocks (spectral convolutions + residuals)
3. Project back to output space

This learns the mapping: k(x,y) -> u(x,y) for Darcy flow
“””

def __init__(
self,
in_channels: int = 1,
out_channels: int = 1,
modes1: int = 12,
modes2: int = 12,
width: int = 32,
n_layers: int = 4,
padding: int = 9
):
super().__init__()

self.modes1 = modes1
self.modes2 = modes2
self.width = width
self.padding = padding

self.fc0 = nn.Linear(in_channels + 2, width)

self.fno_blocks = nn.ModuleList([
FNOBlock(width, modes1, modes2) for _ in range(n_layers)
])

self.fc1 = nn.Linear(width, 128)
self.fc2 = nn.Linear(128, out_channels)

def get_grid(self, shape: Tuple, device: torch.device) -> torch.Tensor:
“””Create normalized grid coordinates.”””
batch_size, size_x, size_y = shape[0], shape[2], shape[3]

gridx = torch.linspace(0, 1, size_x, device=device)
gridy = torch.linspace(0, 1, size_y, device=device)
gridx, gridy = torch.meshgrid(gridx, gridy, indexing=’ij’)

grid = torch.stack([gridx, gridy], dim=-1)
grid = grid.unsqueeze(0).repeat(batch_size, 1, 1, 1)

return grid

def forward(self, x: torch.Tensor) -> torch.Tensor:
batch_size = x.shape[0]

grid = self.get_grid(x.shape, x.device)

x = x.permute(0, 2, 3, 1)
x = torch.cat([x, grid], dim=-1)

x = self.fc0(x)
x = x.permute(0, 3, 1, 2)

if self.padding > 0:
x = F.pad(x, [0, self.padding, 0, self.padding])

for block in self.fno_blocks:
x = block(x)

if self.padding > 0:
x = x[…, :-self.padding, :-self.padding]

x = x.permute(0, 2, 3, 1)
x = F.gelu(self.fc1(x))
x = self.fc2(x)
x = x.permute(0, 3, 1, 2)

return x

print(“\nCreating Fourier Neural Operator model…”)
fno_model = FourierNeuralOperator2D(
in_channels=1,
out_channels=1,
modes1=8,
modes2=8,
width=32,
n_layers=4,
padding=5
).to(device)

n_params = sum(p.numel() for p in fno_model.parameters() if p.requires_grad)
print(f”✓ FNO Model created with {n_params:,} trainable parameters”)



Source link

Leave a Reply

Your email address will not be published. Required fields are marked *

Pin It on Pinterest