【教程】 人工智能基础:学习词汇量的模型

海豚大数据实验室2019-04-21 16:47:16

本文长度为1071字建议阅读6分钟

本文将实现一个能够学习词汇量的模型,对于各种NLP任务,这是一种表示词的强大方式。对于该任务,我不打算使用RNN,但对于后续其他任务,都将依赖本文所介绍的内容和方法。

为何要将词表示成向量?最简单的方式是将词送入一个独热编码(one-hot encoding)的学习系统,即表示为一个长度为词汇表长度的向量,除该词语对应的为止的元素为1外,其余元素均为0.这种方法有两个问题:首先,对于实际应用,这种表示方法会导致向量长度很长;其次,独热编码表示无法刻画不同词语之间的语义关联。

作为语义关联问题的一个解决方案,依据共生关系表示单词的思路由来已久。这种方法的基本思路是,遍历一个大规模文本语料库,针对每个单词统计其在一定距离范围(例如5)内的周围词汇。然后用附近词汇的规范化数量(出现概率)表示每个词语。这种方法背后的思路是在类似语境中使用的词语在语义上也是相似的。这样,便可运用PCA或者类似的方法对出现向量降维,从而得到更稠密的表示。虽然这种方法具有很好的性能,但它要求我们追踪所有词汇的共生矩阵。

2013年,Mikolov等提出了一种依据上下文计算词表示的实用有效方法。他们的skip-gram模型从随机表示开始,并拥有一个试图依据当前词语预测下一个上锡文词语的简单分类器。误差同时通过分类器权值和词的表示进行传播,我们需要对这两者进行调整以减少预测误差。研究发现,在大规模语料库上训练该模型可表示向量逼近压缩后的共生向量,下面利用tensorflow实现skip-gram模型。

准备维基百科语料库

在本例中,我们将使用英文维基百科转储文件,本例对其他语言同样适用。可以从维基百科下载站点获得所有可用的转储文件的概况。https://dumps.wikimedia.org/backup-index.html

import bz2

import collections

import os

import re

from lxml import etree

from helpers import download

class Wikipedia:

TOKEN_REGEX = re.compile(r'[A-Za-z]+|[!?.:,()]')

def __init__(self, url, cache_dir, vocabulary_size=10000):

self._cache_dir = os.path.expanduser(cache_dir)

self._pages_path = os.path.join(self._cache_dir, 'pages.bz2')

self._vocabulary_path = os.path.join(self._cache_dir, 'vocabulary.bz2')

if not os.path.isfile(self._pages_path):

print('Read pages')

self._read_pages(url)

if not os.path.isfile(self._vocabulary_path):

print('Build vocabulary')

self._build_vocabulary(vocabulary_size)

with bz2.open(self._vocabulary_path, 'rt') as vocabulary:

print('Read vocabulary')

self._vocabulary = [x.strip() for x in vocabulary]

self._indices = {x: i for i, x in enumerate(self._vocabulary)}

def __iter__(self):

with bz2.open(self._pages_path, 'rt') as pages:

for page in pages:

words = page.strip().split()

words = [self.encode(x) for x in words]

yield words

@property

def vocabulary_size(self):

return len(self._vocabulary)

def encode(self, word):

return self._indices.get(word, 0)

def decode(self, index):

return self._vocabulary[index]

def _read_pages(self, url):

wikipedia_path = download(url, self._cache_dir)

with bz2.open(wikipedia_path) as wikipedia,

bz2.open(self._pages_path, 'wt') as pages:

for _, element in etree.iterparse(wikipedia, tag='{*}page'):

if element.find('./{*}redirect') is not None:

continue

page = element.findtext('./{*}revision/{*}text')

words = self._tokenize(page)

pages.write(' '.join(words) + ' ')

element.clear()

def _build_vocabulary(self, vocabulary_size):

counter = collections.Counter()

with bz2.open(self._pages_path, 'rt') as pages:

for page in pages:

words = page.strip().split()

counter.update(words)

common = ['<unk>'] + counter.most_common(vocabulary_size - 1)

common = [x[0] for x in common]

with bz2.open(self._vocabulary_path, 'wt') as vocabulary:

for word in common:

vocabulary.write(word + ' ')

@classmethod

def _tokenize(cls, page):

words = cls.TOKEN_REGEX.findall(page)

words = [x.lower() for x in words]

return words

import tensorflow as tf

import numpy as np

from helpers import lazy_property

class EmbeddingModel:

def __init__(self, data, target, params):

self.data = data

self.target = target

self.params = params

self.embeddings

self.cost

self.optimize

@lazy_property

def embeddings(self):

initial = tf.random_uniform(

[self.params.vocabulary_size, self.params.embedding_size],

-1.0, 1.0)

return tf.Variable(initial)

@lazy_property

def optimize(self):

optimizer = tf.train.MomentumOptimizer(

self.params.learning_rate, self.params.momentum)

return optimizer.minimize(self.cost)

@lazy_property

def cost(self):

embedded = tf.nn.embedding_lookup(self.embeddings, self.data)

weight = tf.Variable(tf.truncated_normal(

[self.params.vocabulary_size, self.params.embedding_size],

stddev=1.0 / self.params.embedding_size ** 0.5))

bias = tf.Variable(tf.zeros([self.params.vocabulary_size]))

target = tf.expand_dims(self.target, 1)

return tf.reduce_mean(tf.nn.nce_loss(

weight, bias, embedded, target,

self.params.contrastive_examples,

self.params.vocabulary_size))

import collections

import tensorflow as tf

import numpy as np

from batched import batched

from EmbeddingModel import EmbeddingModel

from skipgrams import skipgrams

from Wikipedia import Wikipedia

from helpers import AttrDict

WIKI_DOWNLOAD_DIR = './wikipedia'

params = AttrDict(

vocabulary_size=10000,

max_context=10,

embedding_size=200,

contrastive_examples=100,

learning_rate=0.5,

momentum=0.5,

batch_size=1000,

)

data = tf.placeholder(tf.int32, [None])

target = tf.placeholder(tf.int32, [None])

model = EmbeddingModel(data, target, params)

corpus = Wikipedia(

'https://dumps.wikimedia.org/enwiki/20160501/';

'enwiki-20160501-pages-meta-current1.xml-p000000010p000030303.bz2',

WIKI_DOWNLOAD_DIR,

params.vocabulary_size)

examples = skipgrams(corpus, params.max_context)

batches = batched(examples, params.batch_size)

sess = tf.Session()

sess.run(tf.initialize_all_variables())

average = collections.deque(maxlen=100)

for index, batch in enumerate(batches):

feed_dict = {data: batch[0], target: batch[1]}

cost, _ = sess.run([model.cost, model.optimize], feed_dict)

average.append(cost)

print('{}: {:5.1f}'.format(index + 1, sum(average) / len(average)))

if index > 100000:

break

embeddings = sess.run(model.embeddings)

np.save(WIKI_DOWNLOAD_DIR + '/embeddings.npy', embeddings)

大功告成!

免责声明:转载自网络 不用于商业宣传 版权归原作者所有 侵权删除