mirror of https://github.com/THUDM/CodeGeeX.git
You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
84 lines
2.2 KiB
Python
84 lines
2.2 KiB
Python
# coding=utf-8
|
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
"""Utilities for models."""
|
|
|
|
import math
|
|
import torch
|
|
|
|
|
|
def init_method_normal(sigma):
|
|
"""Init method based on N(0, sigma)."""
|
|
|
|
def init_(tensor):
|
|
return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)
|
|
|
|
return init_
|
|
|
|
|
|
def scaled_init_method_normal(sigma, num_layers):
|
|
"""Init method based on N(0, sigma/sqrt(2*num_layers)."""
|
|
std = sigma / math.sqrt(2.0 * num_layers)
|
|
|
|
def init_(tensor):
|
|
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
|
|
|
|
return init_
|
|
|
|
|
|
def attention_mask_func(attention_scores, attention_mask):
|
|
attention_scores.masked_fill_(attention_mask, -10000.0)
|
|
|
|
return attention_scores
|
|
|
|
|
|
def get_linear_layer(rows, columns, init_method):
|
|
"""Simple linear layer with weight initialization."""
|
|
layer = torch.nn.Linear(rows, columns)
|
|
init_method(layer.weight)
|
|
with torch.no_grad():
|
|
layer.bias.zero_()
|
|
return layer
|
|
|
|
|
|
def fast_gelu(x):
|
|
"""Mindspore's fast gelu implementation."""
|
|
return x / (1 + torch.exp(-1.702 * torch.abs(x))) * torch.exp(0.851 * (x - torch.abs(x)))
|
|
|
|
|
|
@torch.jit.script
|
|
def gelu_impl(x):
|
|
"""OpenAI's gelu implementation."""
|
|
return (
|
|
0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * (1.0 + 0.044715 * x * x)))
|
|
)
|
|
|
|
|
|
def openai_gelu(x):
|
|
return gelu_impl(x)
|
|
|
|
|
|
# This is actually Python equivalent of torch.nn.functional.gelu(), also with type hints for ONNX exporter
|
|
@torch.jit.script
|
|
def erf_gelu(x):
|
|
return (
|
|
x
|
|
* 0.5
|
|
* (
|
|
torch.erf(x / 1.41421).to(dtype=x.dtype)
|
|
+ torch.ones_like(x).to(dtype=x.dtype)
|
|
)
|
|
)
|