欧美free性护士vide0shd,老熟女,一区二区三区,久久久久夜夜夜精品国产,久久久久久综合网天天,欧美成人护士h版

目錄

柚子快報(bào)激活碼778899分享:c語(yǔ)言 llama.c中的代碼

柚子快報(bào)激活碼778899分享:c語(yǔ)言 llama.c中的代碼

http://yzkb.51969.com/

1、build transformer

void build_transformer(Transformer *t, char* checkpoint_path) {

// read in the Config and the Weights from the checkpoint

read_checkpoint(checkpoint_path, &t->config, &t->weights, &t->fd, &t->data, &t->file_size);

// allocate the RunState buffers

malloc_run_state(&t->state, &t->config);

}

1.1 read_checkpoint

void read_checkpoint(char* checkpoint, Config* config, TransformerWeights* weights,

int* fd, float** data, ssize_t* file_size) {

FILE *file = fopen(checkpoint, "rb");

if (!file) { fprintf(stderr, "Couldn't open file %s\n", checkpoint); exit(EXIT_FAILURE); }

// read in the config header 判斷是否成功從文件中讀取了一個(gè)完整的Config結(jié)構(gòu)體。如果讀取失敗或者沒(méi)有讀滿(mǎn)一個(gè)Config結(jié)構(gòu)體(例如遇到文件結(jié)尾),則表達(dá)式的值為真(非零),表示出現(xiàn)了錯(cuò)誤或意外情況。

if (fread(config, sizeof(Config), 1, file) != 1) { exit(EXIT_FAILURE); }

// negative vocab size is hacky way of signaling unshared weights. bit yikes.這意味著如果 config 結(jié)構(gòu)體中的 vocab_size 成員變量大于0,則將 shared_weights 變量賦值為1;反之,若 vocab_size 等于0或小于0,則將 shared_weights 變量賦值為0。

int shared_weights = config->vocab_size > 0 ? 1 : 0;

config->vocab_size = abs(config->vocab_size); //negative is hacky way,so abs 32000

// figure out the file size

fseek(file, 0, SEEK_END); // move file pointer to end of file

*file_size = ftell(file); // get the file size, in bytes 0

fclose(file);

// memory map the Transformer weights into the data pointer

*fd = open(checkpoint, O_RDONLY); // open in read only mode

if (*fd == -1) { fprintf(stderr, "open failed!\n"); exit(EXIT_FAILURE); }

*data = mmap(NULL, *file_size, PROT_READ, MAP_PRIVATE, *fd, 0); //整個(gè)語(yǔ)句的作用是將文件的內(nèi)容映射到進(jìn)程的虛擬地址空間中,這樣就可以像操作內(nèi)存一樣直接訪問(wèn)文件內(nèi)容,而無(wú)需通過(guò)傳統(tǒng)的I/O操作。映射結(jié)束后,*data指向的就是映射后的內(nèi)存區(qū)域的起始地址。

if (*data == MAP_FAILED) { fprintf(stderr, "mmap failed!\n"); exit(EXIT_FAILURE); } //判斷是否映射成功,如果mmap()函數(shù)返回MAP_FAILED(通常是一個(gè)負(fù)值)

float* weights_ptr = *data + sizeof(Config)/sizeof(float);//算權(quán)重?cái)?shù)組在映射內(nèi)存中的起始地址:首先找到Config結(jié)構(gòu)體結(jié)束的位置,然后將這個(gè)位置加上sizeof(Config)/sizeof(float)得到weights_ptr,即指向權(quán)重?cái)?shù)據(jù)的首地址。

memory_map_weights(weights, config, weights_ptr, shared_weights);

}

1.2 memory_map_weights

//(gdb) print *p $15 = {dim = 288, hidden_dim = 768, n_layers = 6, n_heads = 6, n_kv_heads = 6, vocab_size = 32000, seq_len = 256}

//函數(shù)的目的是TransformerWeights結(jié)構(gòu)體中的所有成員變量都已指向正確的內(nèi)存區(qū)域,包含了完整的模型權(quán)重?cái)?shù)據(jù)

/*

+-----------------------+

| token_embedding_table |

+-----------------------+

| rms_att_weight |

+-----------------------+

| wq (多層次分布) |

+-----------------------+

| wk (多層次分布) |

+-----------------------+

| wv (多層次分布) |

+-----------------------+

| wo (多層次分布) |

+-----------------------+

| rms_ffn_weight |

+-----------------------+

| w1 (多層次分布) |

+-----------------------+

| w2 (多層次分布) |

+-----------------------+

| w3 (多層次分布) |

+-----------------------+

| rms_final_weight |

+-----------------------+

[... skipped RoPE-related memory ...]

+-----------------------+

| wcls (可能共享) |

+-----------------------+

*/

void memory_map_weights(TransformerWeights *w, Config* p, float* ptr, int shared_weights) {

int head_size = p->dim / p->n_heads; //計(jì)算注意力頭大小(head_size)。

// make sure the multiplications below are done in 64bit to fit the parameter counts of 13B+ models

unsigned long long n_layers = p->n_layers;

w->token_embedding_table = ptr; //將weight中的第一個(gè)位置給weight結(jié)構(gòu)體中的token_embedding_table

ptr += p->vocab_size * p->dim;//以上算一個(gè)整體

w->rms_att_weight = ptr;

ptr += n_layers * p->dim;

w->wq = ptr;

ptr += n_layers * p->dim * (p->n_heads * head_size);

w->wk = ptr;

ptr += n_layers * p->dim * (p->n_kv_heads * head_size);

w->wv = ptr;

ptr += n_layers * p->dim * (p->n_kv_heads * head_size);

w->wo = ptr;

ptr += n_layers * (p->n_heads * head_size) * p->dim;

w->rms_ffn_weight = ptr;

ptr += n_layers * p->dim;

w->w1 = ptr;

ptr += n_layers * p->dim * p->hidden_dim;

w->w2 = ptr;

ptr += n_layers * p->hidden_dim * p->dim;

w->w3 = ptr;

ptr += n_layers * p->dim * p->hidden_dim;

w->rms_final_weight = ptr;

ptr += p->dim;

ptr += p->seq_len * head_size / 2; // skip what used to be freq_cis_real (for RoPE)

ptr += p->seq_len * head_size / 2; // skip what used to be freq_cis_imag (for RoPE)

w->wcls = shared_weights ? w->token_embedding_table : ptr;

}

1.3 malloc_run_state

// 為state分配空間,監(jiān)控代碼運(yùn)行情況

void malloc_run_state(RunState* s, Config* p) {

// we calloc instead of malloc to keep valgrind happy

int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads;

s->x = calloc(p->dim, sizeof(float));

s->xb = calloc(p->dim, sizeof(float));

s->xb2 = calloc(p->dim, sizeof(float));

s->hb = calloc(p->hidden_dim, sizeof(float));

s->hb2 = calloc(p->hidden_dim, sizeof(float));

s->q = calloc(p->dim, sizeof(float));

s->key_cache = calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float));

s->value_cache = calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float));

s->att = calloc(p->n_heads * p->seq_len, sizeof(float));

s->logits = calloc(p->vocab_size, sizeof(float));

// ensure all mallocs went fine

if (!s->x || !s->xb || !s->xb2 || !s->hb || !s->hb2 || !s->q

|| !s->key_cache || !s->value_cache || !s->att || !s->logits) {

fprintf(stderr, "malloc failed!\n");

exit(EXIT_FAILURE);

}

}

2 generate(&transformer, &tokenizer, &sampler, prompt, steps);

2.1 encode

encode(tokenizer, prompt, 1, 0, prompt_tokens, &num_prompt_tokens);

(gdb) print *t

$1 = {vocab = 0x7fe657a28010, vocab_scores = 0x560eb70a5b00, sorted_vocab = 0x0, vocab_size = 32000, max_token_length = 27,

byte_pieces = "\000\000\001\000\002\000\003\000\004\000\005\000\006\000\a\000\b\000\t\000\n\000\v\000\f\000\r\000\016\000\017\000\020\000\021\000\022\000\023\000\024\000\025\000\026\000\027\000\030\000\031\000\032\000\033\000\034\000\035\000\036\000\037\000 \000!\000\"\000#\000$\000%\000&\000'\000(\000)\000*\000+\000,\000-\000.\000/\000\060\000\061\000\062\000\063\000\064\000\065\000\066\000\067\000\070\000\071\000:\000;\000<\000=\000>\000?\000@\000A\000B\000C\000D\000E\000F\000G\000H\000I\000J\000K\000L\000M\000N\000O\000P\000Q\000R\000S\000T\000U\000V\000W\000X\000Y\000Z\000[\000\\\000]\000^\000_\000`\000a\000b\000c\000"...}

vocab:指向一個(gè)包含詞匯表字符串的數(shù)組的指針。輸出排序前的vocab數(shù)組內(nèi)容

(gdb) p *text

$2 = 104 'h'

(gdb) print bos

$14 = 1 '\001'

(gdb) print eos

$15 = 0 '\000'

(gdb) p *tokens

$5 = 1537102592

(gdb) p *n_tokens

$6 = 0

void encode(Tokenizer* t, char *text, int8_t bos, int8_t eos, int *tokens, int *n_tokens) {

// encode the string text (input) into an upper-bound preallocated tokens[] array

// bos != 0 means prepend the BOS token (=1), eos != 0 means append the EOS token (=2)

if (text == NULL) { fprintf(stderr, "cannot encode NULL text\n"); exit(EXIT_FAILURE); }

//處理詞匯表,如果詞匯表未排序或初始化,會(huì)進(jìn)行初始化分配內(nèi)存t->sorted_vocab

if (t->sorted_vocab == NULL) {

// lazily malloc and sort the vocabulary 通過(guò)一個(gè)循環(huán)將原始未排序的詞匯表t->vocab中的每個(gè)元素復(fù)制到新分配的sorted_vocab中。并排序

t->sorted_vocab = malloc(t->vocab_size * sizeof(TokenIndex));

for (int i = 0; i < t->vocab_size; i++) {

t->sorted_vocab[i].str = t->vocab[i];

t->sorted_vocab[i].id = i;

}

qsort(t->sorted_vocab, t->vocab_size, sizeof(TokenIndex), compare_tokens);

}

解釋如下:

(gdb) p t->vocab[0]

$5 = 0x561b5bfbbf20 ""

(gdb) p t->vocab[1]

$6 = 0x561b5bfbbf40 "\n\n"

(gdb) p t->vocab[2]

$7 = 0x561b5bfbbf60 "\n\n"

(gdb) p t->vocab[100]

$8 = 0x561b5bfbcba0 "<0x61>"

(gdb) p t->vocab[1100]

$9 = 0x561b5bfc48a0 "son"

(gdb) p t->vocab[1101]

$10 = 0x561b5bfc48c0 " follow"

排序后:地址,字母,id

(gdb) print t->sorted_vocab[0]

$3 = {str = 0x5564f5456f60 "\n\n", id = 2}

(gdb) print t->sorted_vocab[1000]

$4 = {str = 0x5564f54a5880 " Chicago", id = 10059}

(gdb) print t->sorted_vocab[1001]

$5 = {str = 0x5564f54c8960 " Chief", id = 14546}

(gdb) print t->sorted_vocab[1002]

$6 = {str = 0x5564f5526660 " Chiesa", id = 26553}

// create a temporary buffer that will store merge candidates of always two consecutive tokens

// *2 for concat, +1 for null terminator +2 for UTF8 (in case max_token_length is 1)

char* str_buffer = malloc((t->max_token_length*2 +1 +2) * sizeof(char));

size_t str_len = 0;

解釋如下:

t->max_token_length * 2 + 1 + 2:計(jì)算分配內(nèi)存大小的方式解釋如下:

t->max_token_length * 2:假設(shè)每個(gè)詞元的最大長(zhǎng)度是t->max_token_length,將兩個(gè)詞元拼接起來(lái)時(shí),可能的最大長(zhǎng)度將是它們長(zhǎng)度之和。

+1:添加一個(gè)額外字節(jié)用于字符串的終止符\0(null terminator),它標(biāo)志著字符串的結(jié)束。

+2:考慮到UTF-8編碼中,即使是單個(gè)字符也可能占用多個(gè)字節(jié)(最多4個(gè)字節(jié)),這里預(yù)留了額外的空間以處理這種可能性。如果最大詞元長(zhǎng)度是1,則至少需要額外預(yù)留2個(gè)字節(jié)來(lái)防止在處理多字節(jié)UTF-8字符時(shí)發(fā)生越界。

// start at 0 tokens

*n_tokens = 0;

// add optional BOS (=1) token, if desired

if (bos) tokens[(*n_tokens)++] = 1;

// add_dummy_prefix is true by default

// so prepend a dummy prefix token to the input string, but only if text != ""

// TODO: pretty sure this isn't correct in the general case but I don't have the

// energy to read more of the sentencepiece code to figure out what it's doing

if (text[0] != '\0') {

int dummy_prefix = str_lookup(" ", t->sorted_vocab, t->vocab_size);//

tokens[(*n_tokens)++] = dummy_prefix;

}

解釋如下: 對(duì)輸入文本進(jìn)行編碼前初始化和處理token數(shù)組。

(gdb) print *tokens

$7 = 1815610112

(gdb) p text[0]

$10 = 104 'h'

(gdb) p bos

$9 = 1 '\001'

如果需要添加開(kāi)始標(biāo)記(BOS,Beginning Of Sentence),則將BOS token的ID(這里為1)寫(xiě)入tokens數(shù)組,并遞增*n_tokens的值。

int dummy_prefix = str_lookup(" ", t->sorted_vocab, t->vocab_size);函數(shù)返回找到的字符串在詞匯表中的索引(即token ID)。

語(yǔ)法:這個(gè)表達(dá)式在C/C++編程中執(zhí)行了兩個(gè)操作:

數(shù)組賦值:tokens[(*n_tokens)] = some_value; 這部分將某個(gè)值(這里為some_value)賦給數(shù)組tokens的指定索引位置。這里的索引是通過(guò)指針n_tokens所指向的整數(shù)值來(lái)確定的。

后置遞增運(yùn)算符:(*n_tokens)++ 后置遞增運(yùn)算符會(huì)先使用當(dāng)前*n_tokens的值作為索引,然后在此之后將*n_tokens的值加1。這意味著每次執(zhí)行此表達(dá)式時(shí),都會(huì)向tokens數(shù)組添加一個(gè)新元素,并且下一次添加將會(huì)發(fā)生在數(shù)組的下一個(gè)位置上。

結(jié)合在一起,這段代碼常用于動(dòng)態(tài)地填充一個(gè)數(shù)組,其中數(shù)組的索引隨著每次添加新元素而自動(dòng)增加。在這種情況下,它被用來(lái)逐個(gè)添加token ID到已編碼的token列表(tokens數(shù)組)中。

// Okay UTF-8 time. This will get messy. Here is the reference from Wikipedia:

// Code point ? UTF-8 conversion

// First code point Last code point Byte 1 Byte 2 Byte 3 Byte 4

// U+0000 U+007F 0xxxxxxx

// U+0080 U+07FF 110xxxxx 10xxxxxx

// U+0800 U+FFFF 1110xxxx 10xxxxxx 10xxxxxx

// U+10000 U+10FFFF 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx

// process the raw (UTF-8) byte sequence of the input string

for (char *c = text; *c != '\0'; c++) {

// reset buffer if the current byte is ASCII or a leading byte

// 0xC0 is 11000000, so (*c & 0xC0) keeps the first 2 bits and zeros the rest

// 0x80 is 10000000

// in UTF-8, all continuation bytes start with "10" in first two bits

// so in English this is: "if this byte is not a continuation byte"

if ((*c & 0xC0) != 0x80) {

// this byte must be either a leading byte (11...) or an ASCII char (0x...)

// => reset our location, as we're starting a new UTF-8 codepoint

str_len = 0;

}

// append the current byte to the buffer

str_buffer[str_len++] = *c; // ++ is post-increment, incremented after this line

str_buffer[str_len] = '\0';

// while the next character is a continuation byte, continue appending

// but if there are too many of them, just stop to avoid overruning str_buffer size.

if ((*(c+1) & 0xC0) == 0x80 && str_len < 4) {

continue;

}

// ok c+1 is not a continuation byte, so we've read in a full codepoint

int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size);

if (id != -1) {

// we found this codepoint in vocab, add it as a token

tokens[(*n_tokens)++] = id;

} else {

// byte_fallback encoding: just encode each byte as a token

// +3 is here because the first 3 vocab elements are , ,

// so the individual bytes only start at index 3

for (int i=0; i < str_len; i++) {

tokens[(*n_tokens)++] = (unsigned char)str_buffer[i] + 3;

}

}

str_len = 0; // protect against a sequence of stray UTF8 continuation bytes

}

代碼解釋如下

這段代碼主要用于處理輸入字符串(UTF-8編碼)中的每個(gè)字符,將其轉(zhuǎn)換為詞匯表中對(duì)應(yīng)的token ID

根據(jù)Wikipedia上的UTF-8編碼規(guī)則,根據(jù)不同的Unicode碼點(diǎn)范圍,字符可以由1到4個(gè)字節(jié)組成。例如:

U+0000至U+007F的字符只需要一個(gè)字節(jié)表示。

U+0080至U+07FF的字符需要兩個(gè)字節(jié)表示,第一個(gè)字節(jié)以110開(kāi)頭,第二個(gè)字節(jié)以10開(kāi)頭。

更高級(jí)別的碼點(diǎn)需要更多字節(jié)。

(gdb) print *c //輸入的是hello

$11 = 104 'h'

假設(shè)有以下UTF-8編碼序列:

Code

63 61 C3 A9

這里,C3 和 A9 組成了一個(gè)法語(yǔ)字符 'é' 的UTF-8編碼。在這個(gè)例子中:

當(dāng)指針 c 指向 C3 時(shí),(*c & 0xC0) == 0xC0,因此這不是一個(gè)延續(xù)字節(jié),而是多字節(jié)字符的起始字節(jié)。

當(dāng)指針 c 指向 A9 時(shí),(*c & 0xC0) == 0x80,這意味著這是一個(gè)延續(xù)字節(jié)。

if ((*c & 0xC0) != 0x80) {

// this byte must be either a leading byte (11...) or an ASCII char (0x...)

// => reset our location, as we're starting a new UTF-8 codepoint

str_len = 0;

}

// append the current byte to the buffer

str_buffer[str_len++] = *c; // ++ is post-increment, incremented after this line

str_buffer[str_len] = '\0';

(h &0xC0) != 0x80,執(zhí)行str_len = 0,即 str_buffer[0]=h

(gdb) print str_buffer[0]

$5 = 104 'h'

(gdb) print str_buffer[1]

$6 = 0 '\000'

if ((*(c+1) & 0xC0) == 0x80 && str_len < 4) {

continue;

}

(gdb) print *(c+1)

$25 = 101 'e'

int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size);

函數(shù)用于查找str_buffer所表示的Unicode碼點(diǎn),在已排序的詞匯表(t->sorted_vocab)中是否存在對(duì)應(yīng)的條目,以及其在詞匯表中的索引或ID。

(gdb) print id

$1 = 29882

if (id != -1) {

// we found this codepoint in vocab, add it as a token

tokens[(*n_tokens)++] = id;

(gdb) print (*n_tokens)++

$8 = 3

(gdb) print tokens[0]

$9 = 1

來(lái)源 if (bos) tokens[(*n_tokens)++] = 1;

(gdb) print tokens[1] (

$10 = 29871

來(lái)源:gdb) print dummy_prefix

$1 = 29871

(gdb) print tokens[2]

$11 = 29882

也即是t->sorted_vocab是排序好的詞匯表,str_buffer是prompt中的一個(gè)個(gè)字母。用prompt中的字母在t->sorted_vocab詞匯表找id,然后給tokens

while (1) {

float best_score = -1e10;

int best_id = -1;

int best_idx = -1;

for (int i=0; i < (*n_tokens-1); i++) {

// check if we can merge the pair (tokens[i], tokens[i+1])

sprintf(str_buffer, "%s%s", t->vocab[tokens[i]], t->vocab[tokens[i+1]]);

int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size);

if (id != -1 && t->vocab_scores[id] > best_score) {

// this merge pair exists in vocab! record its score and position

best_score = t->vocab_scores[id];

best_id = id;

best_idx = i;

}

}

代碼解釋如下:

(gdb) print *n_tokens

$10 = 7

來(lái)源:1、dummy_prefix、h、e、l、l、o

(gdb) p *tokens@7

$12 = {1, 29871, 29882, 29872, 29880, 29880, 29877}

(gdb) p *t->sorted_vocab@12

$17 = {{str = 0x5591f27cdf60 "\n\n", id = 2}, {str = 0x5591f27cdf40 "\n\n", id = 1}, {str = 0x5591f28b85d0 "\r", id = 30004}, {

str = 0x5591f28b7530 " ", id = 29871}, {str = 0x5591f2802ba0 " \r", id = 6756}, {str = 0x5591f27cff80 " ", id = 259}, {str = 0x5591f27db0e0 " ",

id = 1678}, {str = 0x5591f27d00a0 " ", id = 268}, {str = 0x5591f27d1360 " ", id = 418}, {str = 0x5591f27d2280 " ", id = 539}, {

str = 0x5591f27f2b60 " ", id = 4706}, {str = 0x5591f27d05a0 " ", id = 308}}

(gdb) print *t->vocab@8

$19 = {0x5591f27cdf20 "", 0x5591f27cdf40 "\n\n", 0x5591f27cdf60 "\n\n", 0x5591f27cdf80 "<0x00>", 0x5591f27cdfa0 "<0x01>", 0x5591f27cdfc0 "<0x02>", 0x5591f27cdfe0 "<0x03>",

0x5591f27ce000 "<0x04>"}

sprintf(str_buffer, "%s%s", t->vocab[tokens[i]], t->vocab[tokens[i+1]]);

這段代碼的作用是將連續(xù)的兩個(gè)token(由tokens數(shù)組中的索引i和i+1確定)對(duì)應(yīng)的字符串從詞匯表中取出,并拼接到一起,存儲(chǔ)到str_buffer中

調(diào)用str_lookup函數(shù),使用拼接后的字符串在已排序的詞匯表(sorted_vocab)中查找是否有對(duì)應(yīng)的ID。如果找到了并且該ID在vocab_scores中的得分大于之前找到的最佳得分,則更新最佳得分、最佳ID以及最佳索引(即當(dāng)前考慮的token對(duì)的位置i)。

int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size);

if (id != -1 && t->vocab_scores[id] > best_score) {

// this merge pair exists in vocab! record its score and position

best_score = t->vocab_scores[id];

best_id = id;

best_idx = i;

}

這段循環(huán)的意義在于尋找最優(yōu)的token對(duì)合并方式,**即將那些能形成詞匯表中存在的有效詞的連續(xù)token合并成一個(gè)token,依據(jù)是它們合并后在詞匯表中的得分。通過(guò)這樣的優(yōu)化過(guò)程,可以生成更加緊湊且符合預(yù)定義詞匯表結(jié)構(gòu)的編碼結(jié)果。**

那為什么要取排序前的t->vocab兩個(gè)相鄰token呢?

千問(wèn)解釋?zhuān)?/p>

雖然實(shí)際編碼時(shí)使用的是排序后的詞匯表,但在構(gòu)建要查找的合并字符串時(shí),需要依據(jù)原始未排序的詞匯表來(lái)獲取token對(duì)應(yīng)的字符串內(nèi)容。這是因?yàn)榕判蚯暗膖->vocab數(shù)組可能按照某種規(guī)則存儲(chǔ)了token及其對(duì)應(yīng)的字符串值,而排序后t->sorted_vocab主要關(guān)注ID順序和得分,便于快速查找和比較。

if (best_idx == -1) {

break; // we couldn't find any more pairs to merge, so we're done

}

// merge the consecutive pair (best_idx, best_idx+1) into new token best_id

tokens[best_idx] = best_id;

// delete token at position best_idx+1, shift the entire sequence back 1

for (int i = best_idx+1; i < (*n_tokens-1); i++) {

tokens[i] = tokens[i+1];

}

(*n_tokens)--; // token length decreased

}

// add optional EOS (=2) token, if desired

if (eos) tokens[(*n_tokens)++] = 2;

free(str_buffer);

}

找到可合并token對(duì),進(jìn)行合并更新。

柚子快報(bào)激活碼778899分享:c語(yǔ)言 llama.c中的代碼

http://yzkb.51969.com/

推薦鏈接

評(píng)論可見(jiàn),查看隱藏內(nèi)容

本文內(nèi)容根據(jù)網(wǎng)絡(luò)資料整理,出于傳遞更多信息之目的,不代表金鑰匙跨境贊同其觀點(diǎn)和立場(chǎng)。

轉(zhuǎn)載請(qǐng)注明,如有侵權(quán),聯(lián)系刪除。

本文鏈接:http://gantiao.com.cn/post/19206258.html

發(fā)布評(píng)論

您暫未設(shè)置收款碼

請(qǐng)?jiān)谥黝}配置——文章設(shè)置里上傳

掃描二維碼手機(jī)訪問(wèn)

文章目錄