program(1.3) [buildInfo = dict({{"coremlc-component-MIL", "3520.2.1"}, {"coremlc-version", "3520.2.1"}, {"coremltools-component-torch", "2.1.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.1"}, {"mldb_token", "mldb-27marq7y6j"}})] { func main(tensor audio_embedding, tensor vision_embedding) { bool var_4 = const()[name = string("op_4"), val = bool(true)]; int32 var_6 = const()[name = string("op_6"), val = int32(-1)]; bool input_1_interleave_0 = const()[name = string("input_1_interleave_0"), val = bool(false)]; tensor input_1_cast_fp16 = concat(axis = var_6, interleave = input_1_interleave_0, values = (vision_embedding, audio_embedding))[name = string("input_1_cast_fp16")]; tensor gate_proj_0_0_weight_to_fp16 = const()[name = string("gate_proj_0_0_weight_to_fp16"), val = tensor(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64)))]; tensor gate_proj_0_0_bias_to_fp16 = const()[name = string("gate_proj_0_0_bias_to_fp16"), val = tensor(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(262272)))]; tensor linear_0_cast_fp16 = linear(bias = gate_proj_0_0_bias_to_fp16, weight = gate_proj_0_0_weight_to_fp16, x = input_1_cast_fp16)[name = string("linear_0_cast_fp16")]; string var_21_mode_0 = const()[name = string("op_21_mode_0"), val = string("EXACT")]; tensor var_21_cast_fp16 = gelu(mode = var_21_mode_0, x = linear_0_cast_fp16)[name = string("op_21_cast_fp16")]; tensor gate_proj_0_2_weight_to_fp16 = const()[name = string("gate_proj_0_2_weight_to_fp16"), val = tensor(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(262592)))]; tensor linear_1_bias_0_to_fp16 = const()[name = string("linear_1_bias_0_to_fp16"), val = tensor(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(393728)))]; tensor linear_1_cast_fp16 = linear(bias = linear_1_bias_0_to_fp16, weight = gate_proj_0_2_weight_to_fp16, x = var_21_cast_fp16)[name = string("linear_1_cast_fp16")]; tensor gate_proj_1_scale_to_fp16 = const()[name = string("gate_proj_1_scale_to_fp16"), val = tensor(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(394816)))]; tensor var_25_cast_fp16 = mul(x = linear_1_cast_fp16, y = gate_proj_1_scale_to_fp16)[name = string("op_25_cast_fp16")]; tensor var_26_cast_fp16 = sigmoid(x = var_25_cast_fp16)[name = string("op_26_cast_fp16")]; fp16 alpha_scale_to_fp16 = const()[name = string("alpha_scale_to_fp16"), val = fp16(0x1p-1)]; tensor var_27_cast_fp16 = mul(x = var_26_cast_fp16, y = alpha_scale_to_fp16)[name = string("op_27_cast_fp16")]; tensor var_28_cast_fp16 = mul(x = var_27_cast_fp16, y = audio_embedding)[name = string("op_28_cast_fp16")]; tensor input0_1_cast_fp16 = add(x = vision_embedding, y = var_28_cast_fp16)[name = string("input0_1_cast_fp16")]; tensor var_30 = const()[name = string("op_30"), val = tensor([-1])]; tensor var_31_cast_fp16 = reduce_l2_norm(axes = var_30, keep_dims = var_4, x = input0_1_cast_fp16)[name = string("op_31_cast_fp16")]; tensor fused_embedding = real_div(x = input0_1_cast_fp16, y = var_31_cast_fp16)[name = string("op_32_cast_fp16")]; } -> (fused_embedding); }