class XPUMxFp8LinearKernel(Mxfp8LinearKernel):
"""MXFP8 W8A8 GEMM on XPU."""
@classmethod
def is_supported(
cls, compute_capability: int | None = None
) -> tuple[bool, str | None]:
if not current_platform.is_xpu():
return False, "XPUMxFp8 only support on XPU"
return True, None
@classmethod
def can_implement(cls, c: Mxfp8LinearLayerConfig) -> tuple[bool, str | None]:
return True, None
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
weight_scale = layer.weight_scale.view(torch.float8_e8m0fnu)
weight_scale = weight_scale.t().contiguous()
replace_parameter(layer, "weight", layer.weight.t())
replace_parameter(layer, "weight_scale", weight_scale.data)
def apply_weights(
self,
layer: torch.nn.Module,
x: torch.Tensor,
bias: torch.Tensor | None = None,
) -> torch.Tensor:
out_dtype = x.dtype
x_fp8, x_scale = quant_mxfp8(x)
return torch.ops._xpu_C.fp8_gemm(
x_fp8,
layer.weight,
out_dtype,
x_scale,
layer.weight_scale,
bias,
)