word2vec的算法原理(不用开源包,python实现)

看了很多关于word2vec的算法原理的介绍文章,看明白了,但依然有点不深刻。

以下是python直接实现的word2vec的算法,简单明了,读完就懂了

python 复制代码
import numpy as np

def tokenize(text):
    return text.lower().split()

def generate_word_pairs(sentences, window_size):
    word_pairs = []
    for sentence in sentences:
        for i, center_word in enumerate(sentence):
            for j in range(i - window_size, i + window_size + 1):
                if j >= 0 and j < len(sentence) and j != i:
                    context_word = sentence[j]
                    word_pairs.append((center_word, context_word))
    return word_pairs

def create_word_index(sentences):
    word_set = set(word for sentence in sentences for word in sentence)
    return {word: i for i, word in enumerate(word_set)}


def one_hot_encoding(word, word_index):
    one_hot = np.zeros(len(word_index))
    one_hot[word_index[word]] = 1
    return one_hot

def train_word2vec(sentences, vector_size, window_size, learning_rate, epochs):
    word_index = create_word_index(sentences)
    W1 = np.random.rand(len(word_index), vector_size)
    W2 = np.random.rand(vector_size, len(word_index))

    word_pairs = generate_word_pairs(sentences, window_size)

    for epoch in range(epochs):
        loss = 0
        for center_word, context_word in word_pairs:
            center_word_encoded = one_hot_encoding(center_word, word_index)
            context_word_encoded = one_hot_encoding(context_word, word_index)

            hidden_layer = np.dot(center_word_encoded, W1)
            output_layer = np.dot(hidden_layer, W2)

            exp_output = np.exp(output_layer)
            softmax_output = exp_output / np.sum(exp_output)

            error = softmax_output - context_word_encoded

            dW2 = np.outer(hidden_layer, error)
            dW1 = np.outer(center_word_encoded, np.dot(W2, error))

            W1 -= learning_rate * dW1
            W2 -= learning_rate * dW2

            loss += -np.sum(output_layer * context_word_encoded) + np.log(np.sum(exp_output))

        print(f"Epoch: {epoch + 1}, Loss: {loss}")

    return W1, word_index

sentences = [
    tokenize("This is a sample sentence"),
    tokenize("Another example sentence"),
    tokenize("One more example")
]

vector_size = 100
window_size = 2
learning_rate = 0.01
epochs = 100

W1, word_index = train_word2vec(sentences, vector_size, window_size, learning_rate, epochs)

for word, index in word_index.items():
    print(f"{word}: {W1[index]}")
相关推荐
小途软件10 分钟前
ssm607家政公司服务平台的设计与实现+vue
java·人工智能·pytorch·python·深度学习·语言模型
WJSKad123519 分钟前
传送带物体检测识别_基于YOLO11与RGCSPELAN改进算法_工业视觉检测系统
人工智能·算法·视觉检测
laplace012319 分钟前
Part3 RAG文档切分
笔记·python·中间件·langchain·rag
dhdjjsjs21 分钟前
Day59 PythonStudy
python
brent42323 分钟前
DAY48 Grad-CAM与Hook函数
python
闲人编程27 分钟前
商品管理与库存系统
服务器·网络·数据库·python·api·数据模型·codecapsule
ServBay36 分钟前
8 个 Python 自动化脚本让你告别重复劳动
后端·python
仍然.38 分钟前
JavaDataStructure---排序
数据结构·算法·排序算法
测试老哥41 分钟前
2026最新软件测试面试热点问题(含答案+文档)
自动化测试·软件测试·python·测试工具·面试·职场和发展·测试用例
ZhuNian的学习乐园44 分钟前
LLM知识检索增强:RAG_系统化解析与工程实践
人工智能·算法