1# Copyright 2015 The TensorFlow Authors. All Rights Reserved. 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================== 15"""Reuters topic classification dataset. 16""" 17from __future__ import absolute_import 18from __future__ import division 19from __future__ import print_function 20 21import json 22 23import numpy as np 24 25from tensorflow.python.keras.preprocessing.sequence import _remove_long_seq 26from tensorflow.python.keras.utils.data_utils import get_file 27from tensorflow.python.platform import tf_logging as logging 28from tensorflow.python.util.tf_export import keras_export 29 30 31@keras_export('keras.datasets.reuters.load_data') 32def load_data(path='reuters.npz', 33 num_words=None, 34 skip_top=0, 35 maxlen=None, 36 test_split=0.2, 37 seed=113, 38 start_char=1, 39 oov_char=2, 40 index_from=3, 41 **kwargs): 42 """Loads the Reuters newswire classification dataset. 43 44 This is a dataset of 11,228 newswires from Reuters, labeled over 46 topics. 45 46 This was originally generated by parsing and preprocessing the classic 47 Reuters-21578 dataset, but the preprocessing code is no longer packaged 48 with Keras. See this 49 [github discussion](https://github.com/keras-team/keras/issues/12072) 50 for more info. 51 52 Each newswire is encoded as a list of word indexes (integers). 53 For convenience, words are indexed by overall frequency in the dataset, 54 so that for instance the integer "3" encodes the 3rd most frequent word in 55 the data. This allows for quick filtering operations such as: 56 "only consider the top 10,000 most 57 common words, but eliminate the top 20 most common words". 58 59 As a convention, "0" does not stand for a specific word, but instead is used 60 to encode any unknown word. 61 62 63 Args: 64 path: where to cache the data (relative to `~/.keras/dataset`). 65 num_words: integer or None. Words are 66 ranked by how often they occur (in the training set) and only 67 the `num_words` most frequent words are kept. Any less frequent word 68 will appear as `oov_char` value in the sequence data. If None, 69 all words are kept. Defaults to None, so all words are kept. 70 skip_top: skip the top N most frequently occurring words 71 (which may not be informative). These words will appear as 72 `oov_char` value in the dataset. Defaults to 0, so no words are 73 skipped. 74 maxlen: int or None. Maximum sequence length. 75 Any longer sequence will be truncated. Defaults to None, which 76 means no truncation. 77 test_split: Float between 0 and 1. Fraction of the dataset to be used 78 as test data. Defaults to 0.2, meaning 20% of the dataset is used as 79 test data. 80 seed: int. Seed for reproducible data shuffling. 81 start_char: int. The start of a sequence will be marked with this 82 character. Defaults to 1 because 0 is usually the padding character. 83 oov_char: int. The out-of-vocabulary character. 84 Words that were cut out because of the `num_words` or 85 `skip_top` limits will be replaced with this character. 86 index_from: int. Index actual words with this index and higher. 87 **kwargs: Used for backwards compatibility. 88 89 Returns: 90 Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. 91 92 **x_train, x_test**: lists of sequences, which are lists of indexes 93 (integers). If the num_words argument was specific, the maximum 94 possible index value is `num_words - 1`. If the `maxlen` argument was 95 specified, the largest possible sequence length is `maxlen`. 96 97 **y_train, y_test**: lists of integer labels (1 or 0). 98 99 Note: The 'out of vocabulary' character is only used for 100 words that were present in the training set but are not included 101 because they're not making the `num_words` cut here. 102 Words that were not seen in the training set but are in the test set 103 have simply been skipped. 104 """ 105 # Legacy support 106 if 'nb_words' in kwargs: 107 logging.warning('The `nb_words` argument in `load_data` ' 108 'has been renamed `num_words`.') 109 num_words = kwargs.pop('nb_words') 110 if kwargs: 111 raise TypeError('Unrecognized keyword arguments: ' + str(kwargs)) 112 113 origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' 114 path = get_file( 115 path, 116 origin=origin_folder + 'reuters.npz', 117 file_hash= 118 'd6586e694ee56d7a4e65172e12b3e987c03096cb01eab99753921ef915959916') 119 with np.load(path, allow_pickle=True) as f: 120 xs, labels = f['x'], f['y'] 121 122 rng = np.random.RandomState(seed) 123 indices = np.arange(len(xs)) 124 rng.shuffle(indices) 125 xs = xs[indices] 126 labels = labels[indices] 127 128 if start_char is not None: 129 xs = [[start_char] + [w + index_from for w in x] for x in xs] 130 elif index_from: 131 xs = [[w + index_from for w in x] for x in xs] 132 133 if maxlen: 134 xs, labels = _remove_long_seq(maxlen, xs, labels) 135 136 if not num_words: 137 num_words = max(max(x) for x in xs) 138 139 # by convention, use 2 as OOV word 140 # reserve 'index_from' (=3 by default) characters: 141 # 0 (padding), 1 (start), 2 (OOV) 142 if oov_char is not None: 143 xs = [[w if skip_top <= w < num_words else oov_char for w in x] for x in xs] 144 else: 145 xs = [[w for w in x if skip_top <= w < num_words] for x in xs] 146 147 idx = int(len(xs) * (1 - test_split)) 148 x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx]) 149 x_test, y_test = np.array(xs[idx:]), np.array(labels[idx:]) 150 151 return (x_train, y_train), (x_test, y_test) 152 153 154@keras_export('keras.datasets.reuters.get_word_index') 155def get_word_index(path='reuters_word_index.json'): 156 """Retrieves a dict mapping words to their index in the Reuters dataset. 157 158 Args: 159 path: where to cache the data (relative to `~/.keras/dataset`). 160 161 Returns: 162 The word index dictionary. Keys are word strings, values are their index. 163 """ 164 origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' 165 path = get_file( 166 path, 167 origin=origin_folder + 'reuters_word_index.json', 168 file_hash='4d44cc38712099c9e383dc6e5f11a921') 169 with open(path) as f: 170 return json.load(f) 171