def predict(x, temp): probs = F.softmax(x / temp, dim = 0) probs = np.squeeze(probs.detach().cpu().numpy()) ind = np.random.choice(vocab_len, 1, p = probs) return ind[0] generated_text = ['there','is','no','one','love'] curr_len = 0 embeds = [] is_end = word_to_int[';'] qt_gen.eval() for i in generated_text: embeds.append(emb[word_to_int[i]]) while(curr_len < 50): curr_len += 1 input_tensor = torch.Tensor(embeds).view(1,5,128).float().to(device) h_h, h_c = qt_gen.zero_states(1) output, (h_h, h_c) = qt_gen(input_tensor, (h_h,h_c)) word_ind = predict(output[-1], 1.6) embeds[0][:4].tolist().extend(emb[word_ind]) generated_text.append(int_to_word[word_ind]) if word_ind == is_end: break print(' '.join(generated_text)) torch.cuda.empty_cache()