Merge pull request #31 from THUDM/develop

Develop
pull/32/head
Qinkai 2 years ago committed by GitHub
commit afdee38502
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -70,7 +70,7 @@ def model_provider(pre_process=True, post_process=True):
mp_rank = mpu.get_tensor_model_parallel_rank()
if os.path.isdir(args.load_state):
model_path = os.path.join(
args.load_state, f"model_mp_rank_{mp_rank}.pt"
args.load_state, "mp_rank_{:02d}_model_states.pt".format(mp_rank)
)
else:
model_path = args.load_state

@ -0,0 +1 @@
from .codegeex_model import CodeGeeXModel

File diff suppressed because it is too large Load Diff

@ -0,0 +1,326 @@
import copy
import json
import os
import time
from typing import *
import paddle
import paddle.nn.functional as F
from dataclasses import dataclass
def get_ltor_masks_and_position_ids(
data,
eod_token,
reset_position_ids,
reset_attention_mask,
):
"""Build masks and position id for left to right model."""
# Extract batch size and sequence length.
micro_batch_size, seq_length = data.shape
# Attention mask (lower triangular).
if reset_attention_mask:
att_mask_batch = micro_batch_size
else:
att_mask_batch = 1
attention_mask = paddle.tril(
paddle.ones((att_mask_batch, seq_length, seq_length))
).reshape([att_mask_batch, 1, seq_length, seq_length])
# Position ids.
position_ids = paddle.arange(seq_length, dtype="int64")
position_ids = position_ids.unsqueeze(0).expand_as(data)
# We need to clone as the ids will be modifed based on batch index.
if reset_position_ids:
position_ids = position_ids.clone()
if reset_position_ids or reset_attention_mask:
# Loop through the batches:
for b in range(micro_batch_size):
# Find indecies where EOD token is.
eod_index = position_ids[b, data[b] == eod_token]
# Detach indecies from positions if going to modify positions.
if reset_position_ids:
eod_index = eod_index.clone()
# Loop through EOD indecies:
prev_index = 0
for j in range(eod_index.shape[0]):
i = eod_index[j]
# Mask attention loss.
if reset_attention_mask:
attention_mask[b, 0, (i + 1) :, : (i + 1)] = 0
# Reset positions.
if reset_position_ids:
position_ids[b, (i + 1) :] -= i + 1 - prev_index
prev_index = i + 1
# Convert attention mask to binary:
attention_mask = attention_mask < 0.5
return attention_mask, position_ids
def get_batch(
context_tokens,
micro_batch_size,
eod_token,
reset_position_ids=False,
reset_attention_mask=False,
):
"""Generate batch from context tokens."""
tokens = context_tokens.reshape([micro_batch_size, -1]).cuda()
# Get the attention mask and postition ids.
attention_mask, position_ids = get_ltor_masks_and_position_ids(
tokens,
eod_token,
reset_position_ids,
reset_attention_mask,
)
return tokens, attention_mask, position_ids
def top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float("Inf")):
"""This function has been mostly taken from huggingface conversational
ai code at
https://medium.com/huggingface/how-to-build-a-state-of-the-art-
conversational-ai-with-transfer-learning-2d818ac26313"""
if top_k > 0:
# Remove all tokens with a probability less than the
# last token of the top-k
indices_to_remove = logits < paddle.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Cconvert to 1D
sorted_logits, sorted_indices = paddle.sort(logits, descending=True, axis=-1)
cumulative_probs = paddle.cumsum(F.softmax(sorted_logits, axis=-1), axis=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token
# above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
for i in range(sorted_indices.shape[0]):
indices_to_remove = sorted_indices[i][sorted_indices_to_remove[i]]
logits[i][indices_to_remove] = filter_value
return logits
def pad_batch(batch, pad_id, seq_length):
context_lengths = []
for tokens in batch:
context_length = len(tokens)
if context_length < seq_length:
tokens.extend([pad_id] * (seq_length - context_length))
context_lengths.append(context_length)
return batch, context_lengths
def forward_step(
model,
tokens,
seq_length,
position_ids,
attention_mask,
layer_past=None,
get_key_value=None,
prompt_length=None,
context_length=None,
):
# Forward pass through the model.
output_tensor = model(
tokens,
position_ids,
attention_mask,
layer_past=layer_past,
get_key_value=get_key_value,
prompt_length=prompt_length,
context_length=context_length,
)
if get_key_value:
output_tensor, layer_past = output_tensor
if get_key_value:
return output_tensor, layer_past
return output_tensor
def get_token_stream(
model,
tokenizer,
seq_length,
out_seq_length,
context_tokens,
return_scores: bool = False,
prompt_length: int = None,
micro_batch_size: int = None,
bad_ids: List = None,
temperature: float = 1.0,
topp: float = 1.0,
topk: int = 0.0,
greedy: bool = False,
recompute: bool = False,
):
context_tokens, context_lengths = pad_batch(context_tokens, tokenizer.eos_token_id, seq_length)
context_tokens_tensor = paddle.to_tensor(context_tokens, dtype="int64")
context_length_tensor = paddle.to_tensor(context_lengths, dtype="int64")
context_length = context_length_tensor.min().item()
tokens, attention_mask, position_ids = get_batch(
context_tokens_tensor,
micro_batch_size,
tokenizer.eos_token_id,
)
batch_token_iterator = sample_sequence_batch(
model,
tokenizer,
context_tokens_tensor,
context_length_tensor,
attention_mask,
position_ids,
seq_length=seq_length,
out_seq_length=out_seq_length,
return_scores=return_scores,
prompt_length=prompt_length,
bad_ids=bad_ids,
temperature=temperature,
topp=topp,
topk=topk,
greedy=greedy,
recompute=recompute,
)
for tokens, lengths in batch_token_iterator:
context_length += 1
if tokens is not None:
yield tokens[:, :context_length], lengths
else:
yield None, None
def switch(val1, val2, boolean):
boolean = boolean.cast(val1.dtype)
return (1 - boolean) * val1 + boolean * val2
def sample_sequence_batch(
model,
tokenizer,
context_tokens,
context_lengths,
attention_mask,
position_ids,
seq_length,
out_seq_length,
maxlen=None,
return_scores: bool = False,
prompt_length: int = None,
bad_ids: List = None,
temperature: float = 1.0,
topp: float = 1.0,
topk: int = 0.0,
recompute: bool = False,
greedy: bool = False,
):
model.eval()
with paddle.no_grad():
context_length = context_lengths.min().item()
eos_id = tokenizer.eos_token_id
counter = 0
org_context_length = context_length
layer_past = None
batch_size = context_tokens.shape[0]
is_done = paddle.zeros([batch_size]).cast("uint8").cuda()
tokens = context_tokens
if maxlen is None:
maxlen = seq_length - 1
if maxlen > (org_context_length + out_seq_length):
maxlen = org_context_length + out_seq_length
lengths = paddle.ones([batch_size]).cast("int64").cuda() * maxlen
if return_scores:
scores = paddle.zeros([batch_size]).cast("float32").cuda()
while context_length <= (maxlen):
if recompute:
logits = model(tokens,
position_ids,
attention_mask,
prompt_length=prompt_length,
context_length=context_length,
)
logits = logits[:, context_length - 1, :]
else:
if counter == 0:
tokens2use = tokens[:, :context_length]
positions2use = position_ids[:, :context_length]
else:
tokens2use = tokens[:, context_length - 1].reshape([
batch_size, -1])
positions2use = position_ids[:, context_length - 1].reshape([
batch_size, -1])
logits, layer_past = model(tokens2use,
positions2use,
attention_mask,
layer_past=layer_past,
get_key_value=True,
prompt_length=prompt_length,
context_length=context_length,
)
logits = logits[:, -1].reshape([batch_size, -1])
if bad_ids is not None:
for bad_id in bad_ids:
logits[:, bad_id] = -10000
if greedy:
prev = paddle.argmax(logits, axis=-1).reshape([-1])
else:
logits = logits.cast("float32")
if return_scores:
orig_log_probs = paddle.log_softmax(logits, axis=-1)
logits /= temperature
logits = top_k_logits(logits, top_k=topk, top_p=topp)
log_probs = F.softmax(logits, axis=-1)
prev = paddle.multinomial(log_probs, num_samples=1).reshape([-1])
started = context_lengths <= context_length
new_tokens = switch(tokens[:, context_length].reshape([-1]), prev, started)
if not greedy and return_scores:
indices = prev.reshape([-1, 1])
new_scores = orig_log_probs.gather(1, indices).reshape([-1])
new_scores = new_scores * started
new_scores = new_scores * is_done.cast("bool").logical_not()
scores += new_scores
tokens[:, context_length] = new_tokens
done_token = (prev == eos_id).cast("uint8") & started.cast("uint8")
just_finished = (done_token & ~is_done).cast("bool")
lengths[just_finished.reshape([-1])] = context_length
is_done = is_done | done_token
done = paddle.all(is_done.cast("bool"))
if return_scores:
yield tokens, (lengths, scores)
else:
yield tokens, lengths
context_length += 1
counter += 1
if done:
break

@ -0,0 +1,55 @@
import argparse
import paddle
import torch
linear_layer = [
"mlp.dense_h_to_4h",
"mlp.dense_4h_to_h",
"attention.query",
"attention.key",
"attention.value",
"attention.dense",
]
def WalkDict(x):
for i in x:
if isinstance(x[i], dict):
WalkDict(x[i])
elif isinstance(x[i], torch.Tensor):
print(f"Converting '{i}' from 'torch.Tensor' to 'numpy.ndarray'.")
npy = x[i].cpu().numpy()
if any([f".{layer}.weight" in i for layer in linear_layer]):
print(f"Transposing linear layer weight '{i}'.")
x[i] = npy.T
else:
x[i] = npy
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument(
"--pt",
type=str,
required=True,
help="Path to pt checkpoint."
)
parser.add_argument(
"--pdparams",
type=str,
required=True,
help="Path to pdparams checkpoint."
)
opt = parser.parse_args()
return opt
def main(opt):
state_dict = torch.load(opt.pt)
WalkDict(state_dict)
paddle.save(state_dict, opt.pdparams)
if __name__ == "__main__":
opt = parse_opt()
main(opt)

@ -0,0 +1,16 @@
# CodeGeeX-13B paddle configuration
CHECKPOINT_PATH="<path where you put the checkpoint (e.g., XXX/codegeex_13b.pdparams)>"
MODEL_ARGS="--num-layers 39 \
--hidden-size 5120 \
--num-attention-heads 40 \
--max-position-embeddings 2048 \
--attention-softmax-in-fp32 \
--load "$CHECKPOINT_PATH" \
--layernorm-epsilon 1e-5 \
--fp16 \
--ws-encoding-start-id 10 \
--ws-encoding-length 10 \
--make-vocab-size-divisible-by 52224 \
--seq-length 2048"

@ -0,0 +1,121 @@
SCRIPT_PATH=$(realpath "$0")
SCRIPT_DIR=$(dirname "$SCRIPT_PATH")
MAIN_DIR=$(dirname "$SCRIPT_DIR")
# ====== Environment ======
# - NCCL & IB
export NCCL_DEBUG=info
export NCCL_IB_DISABLE=0
export NCCL_IB_GID_INDEX=3
HOSTFILE=/zhangpai24/workspace/zqk/hostfile
MASTER_IP=$(cat $HOSTFILE | head -n 1)
cat $HOSTFILE | awk '{print $1 " slots=8"}' > $SCRIPT_DIR/hostfile
echo "MASTER_IP=$MASTER_IP"
# ====== Parameters ======
DATA_PATH="<path with prefix where you put the data (e.g., XXX/data.13b.mmap/data)>"
CKPT_PATH="<path where you put the checkpoint (e.g., XXX/codegeex_13b.pt)>"
DS_CONFIG=ds_config.json
# - 13b
TP=1
PP=1
NLAYERS=39
HIDDEN=5120
NATTN_HEAD=40
EMBED_VOCAB=52224
GLOBAL_BATCH=560
MICRO_BATCH=10
NTRAIN_ITERS=100000
EVAL_INT=10
SAVE_INT=10
TRIAL_TAG="13b-test"
# - trial
TRIAL_NAME="pretrain-codegeex"
# - zero stage
ZERO_STAGE=2
# - logging & output
NOW=$(date +"%Y%m%d_%H%M%S")
OUTPUT_DIR=/zhangpai24/workspace/zqk/chkpts/$TRIAL_NAME-$TRIAL_TAG
TB_DIR=$OUTPUT_DIR/tb$NOW
mkdir -p $OUTPUT_DIR
mkdir -p $TB_DIR
# Deepspeed config
cat <<EOT > $DS_CONFIG
{
"train_batch_size" : $GLOBAL_BATCH,
"train_micro_batch_size_per_gpu": $MICRO_BATCH,
"steps_per_print": 5,
"zero_optimization": {
"stage": $ZERO_STAGE,
"reduce_bucket_size": 50000000,
"allgather_bucket_size": 50000000,
"overlap_comm": true,
"contiguous_gradients": false
},
"fp16": {
"enabled": true,
"loss_scale": 0,
"loss_scale_window": 500,
"hysteresis": 2,
"min_loss_scale": 1,
"initial_scale_power": 12
},
"wall_clock_breakdown" : true
}
EOT
ds_args=""
ds_args=" --deepspeed ${ds_args}"
ds_args=" --no-pipeline-parallel ${ds_args}"
ds_args=" --deepspeed_config=$DS_CONFIG ${ds_args}"
ds_args=" --zero-stage=$ZERO_STAGE ${ds_args}"
ds_args=" --deepspeed-activation-checkpointing ${ds_args}"
echo "Launching deepspeed"
deepspeed \
--hostfile hostfile \
--master_addr $MASTER_IP \
$MAIN_DIR/codegeex/megatron/tools/pretrain_codegeex.py \
--tensor-model-parallel-size $TP \
--pipeline-model-parallel-size $PP \
--no-pipeline-parallel \
--num-layers $NLAYERS \
--hidden-size $HIDDEN \
--make-vocab-size-divisible-by $EMBED_VOCAB \
--num-attention-heads $NATTN_HEAD \
--seq-length 512 \
--loss-scale 12 \
--max-position-embeddings 2048 \
--micro-batch-size $MICRO_BATCH \
--global-batch-size $GLOBAL_BATCH \
--train-iters $NTRAIN_ITERS \
--lr 2e-4 \
--min-lr 1e-7 \
--lr-decay-iters 100000 \
--lr-decay-style cosine \
--lr-warmup-iters 1500 \
--log-interval 1 \
--eval-iters 10 \
--eval-interval $EVAL_INT \
--data-path $DATA_PATH \
--vocab-file $MAIN_DIR/codegeex/tokenizer/vocab.json \
--merge-file $MAIN_DIR/codegeex/tokenizer/merges.txt \
--save-interval $SAVE_INT \
--save $OUTPUT_DIR \
--load $OUTPUT_DIR \
--load-state $CKPT_PATH \
--split 98,2,0 \
--clip-grad 1.0 \
--weight-decay 0.1 \
--adam-beta1 0.9 \
--adam-beta2 0.95 \
--fp16 \
--ln-fp16 \
--attention-softmax-in-fp32 \
--checkpoint-activations \
--override-lr-scheduler \
--tensorboard-dir $TB_DIR \
$ds_args |& tee ${OUTPUT_DIR}/$NOW.log

@ -0,0 +1,39 @@
# This script is used to test the inference of CodeGeeX.
GPU=$1
PROMPT_FILE=$2
SCRIPT_PATH=$(realpath "$0")
SCRIPT_DIR=$(dirname "$SCRIPT_PATH")
MAIN_DIR=$(dirname "$SCRIPT_DIR")
TOKENIZER_PATH="$MAIN_DIR/codegeex/tokenizer/"
# import model configuration
source "$MAIN_DIR/configs/codegeex_13b_paddle.sh"
# export CUDA settings
if [ -z "$GPU" ]; then
GPU=0
fi
export CUDA_HOME=/usr/local/cuda-11.1/
export CUDA_VISIBLE_DEVICES=$GPU
if [ -z "$PROMPT_FILE" ]; then
PROMPT_FILE=$MAIN_DIR/tests/test_prompt.txt
fi
# remove --greedy if using sampling
CMD="python $MAIN_DIR/tests/test_inference_paddle.py \
--prompt-file $PROMPT_FILE \
--tokenizer-path $TOKENIZER_PATH \
--micro-batch-size 1 \
--out-seq-length 1024 \
--temperature 0.8 \
--top-p 0.95 \
--top-k 0 \
--greedy \
$MODEL_ARGS"
echo "$CMD"
eval "$CMD"

@ -0,0 +1,213 @@
import os
import copy
import time
import paddle
import random
import argparse
import numpy as np
from codegeex.paddle.inference import get_token_stream
from codegeex.paddle import CodeGeeXModel
from codegeex.tokenizer import CodeGeeXTokenizer
def model_provider(args):
"""Build the model."""
old_dtype = paddle.get_default_dtype()
paddle.set_default_dtype("float16")
model = CodeGeeXModel(
args.hidden_size,
args.num_layers,
args.num_attention_heads,
args.padded_vocab_size,
args.max_position_embeddings
)
model.language_model.embedding.word_embeddings.to(dtype="float32")
model.language_model.embedding.position_embeddings.to(dtype="float32")
model.language_model.topQueryEmbedding.top_query_embeddings.to(dtype="float32")
for i in model.language_model.transformer.layers:
i.input_layernorm.to(dtype="float32")
i.post_attention_layernorm.to(dtype="float32")
model.language_model.transformer.topQueryLayer.input_layernorm.to(dtype="float32")
model.language_model.transformer.topQueryLayer.post_attention_layernorm.to(dtype="float32")
model.language_model.transformer.final_layernorm.to(dtype="float32")
paddle.set_default_dtype(old_dtype)
return model
def add_code_generation_args(parser):
group = parser.add_argument_group(title="code generation")
group.add_argument(
"--num-layers",
type=int,
default=39,
)
group.add_argument(
"--hidden-size",
type=int,
default=5120,
)
group.add_argument(
"--num-attention-heads",
type=int,
default=40,
)
group.add_argument(
"--padded-vocab-size",
type=int,
default=52224,
)
group.add_argument(
"--max-position-embeddings",
type=int,
default=2048,
)
group.add_argument(
"--temperature",
type=float,
default=1.0,
help="Sampling temperature.",
)
group.add_argument(
"--greedy",
action="store_true",
default=False,
help="Use greedy sampling.",
)
group.add_argument(
"--top-p",
type=float,
default=0.0,
help="Top p sampling.",
)
group.add_argument(
"--top-k",
type=int,
default=0,
help="Top k sampling.",
)
group.add_argument(
"--out-seq-length",
type=int,
default=2048,
help="Size of the output generated text.",
)
group.add_argument(
"--prompt-file",
type=str,
default="./test_prompt.txt",
)
group.add_argument(
"--tokenizer-path",
type=str,
default="./tokenizer",
)
group.add_argument(
"--load",
type=str,
)
group.add_argument(
"--state-dict-path",
type=str,
)
group.add_argument(
"--micro-batch-size",
type=int,
default=1,
)
group.add_argument(
"--quantize",
action="store_true",
)
return parser
def main():
parser = argparse.ArgumentParser()
parser = add_code_generation_args(parser)
args, _ = parser.parse_known_args()
print("Loading tokenizer ...")
tokenizer = CodeGeeXTokenizer(
tokenizer_path=args.tokenizer_path,
mode="codegeex-13b")
print("Loading state dict ...")
state_dict = paddle.load(args.load)
state_dict = state_dict["module"]
print("Building CodeGeeX model ...")
model = model_provider(args)
model.set_state_dict(state_dict)
model.eval()
model.to(dtype="float16")
if args.quantize:
raise NotImplementedError("quantize")
with open(args.prompt_file, "r") as f:
prompt = f.readlines()
prompt = "".join(prompt)
times = {}
out_seq_lengths = [args.out_seq_length]
micro_batch_size = args.micro_batch_size
seq_length = args.max_position_embeddings
for out_seq_length in out_seq_lengths:
print(f"Generating with out_seq_len {out_seq_length}...")
times[out_seq_length] = []
for prompt in [prompt]:
t0 = time.perf_counter()
tokens = tokenizer.encode_code(prompt)
print(tokens)
print("Current prompt:")
print(prompt)
n_token_prompt = len(tokens)
print("N_token_prompt:", n_token_prompt)
token_stream = get_token_stream(
model,
tokenizer,
seq_length,
out_seq_length,
[copy.deepcopy(tokens) for _ in range(micro_batch_size)],
micro_batch_size=micro_batch_size,
topk=args.top_k,
topp=args.top_p,
temperature=args.temperature,
greedy=args.greedy,
)
is_finished = [False for _ in range(micro_batch_size)]
for i, generated in enumerate(token_stream):
generated_tokens = generated[0]
for j in range(micro_batch_size):
if is_finished[j]:
continue
if generated_tokens[j].cpu().numpy()[-1] == tokenizer.eos_token_id or len(
generated_tokens[j]) >= out_seq_length:
is_finished[j] = True
generated_tokens_ = generated_tokens[j].cpu().numpy().tolist()
generated_code = tokenizer.decode_code(generated_tokens_[n_token_prompt:])
generated_code = "".join(generated_code)
t1 = time.perf_counter()
print("Total generation time:", t1 - t0, "# Tokens:", len(generated_tokens_) - n_token_prompt)
print(f"{(t1 - t0) / (len(generated_tokens_) - n_token_prompt)}s/token")
times[out_seq_length].append(t1 - t0)
print("================================= Generated code:")
print(generated_code)
if all(is_finished):
break
print(times)
for out_seq_length in times.keys():
print(out_seq_length, np.mean(times[out_seq_length]))
print("Generation finished.")
if __name__ == "__main__":
main()
Loading…
Cancel
Save