Permalink
Browse files

Restored some copyright attribution that were accidentally removed. (#…

…8688)

* Revert "add"

This reverts commit b0fe391419bb49c7484ba04ee7171c42ae0e46d0.

* Adding Back the License Headers
  • Loading branch information...
1 parent a499f89 commit d2a856a3a2abb4e72edc301b8b821f0b75f30722 @KellenSunderland KellenSunderland committed with cjolivier01 Nov 19, 2017
Showing 391 changed files with 409 additions and 31 deletions.
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright 2015 by Contributors.
* \brief Mininum DMLC library Amalgamation, used for easy plugin of dmlc lib.
* Normally this is not needed.
*/
@@ -14,8 +14,8 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
-#
-#----
+
+
# Copyright (c) 2014 Thomas Heller
# Copyright (c) 2007-2012 Hartmut Kaiser
# Copyright (c) 2010-2011 Matt Anderson
@@ -24,21 +24,21 @@
#----
# Distributed under the Boost Software License, Version 1.0.
# Boost Software License - Version 1.0 - August 17th, 2003
-#
+#
# Permission is hereby granted, free of charge, to any person or organization
# obtaining a copy of the software and accompanying documentation covered by
# this license (the "Software") to use, reproduce, display, distribute,
# execute, and transmit the Software, and to prepare derivative works of the
# Software, and to permit third-parties to whom the Software is furnished to
# do so, all subject to the following:
-#
+#
# The copyright notices in the Software and this entire statement, including
# the above license grant, this restriction and the following disclaimer,
# must be included in all copies of the Software, in whole or in part, and
# all derivative works of the Software, unless such copies or derivative
# works are solely in the form of machine-executable object code generated by
# a source language processor.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2016 by Contributors
* \file MxNetCpp.h
* \brief meta include file for mxnet.cpp
* \author Chuntao Hong, Zhang Chen
@@ -18,6 +18,7 @@
*/
/*!
+* Copyright (c) 2016 by Contributors
* \file base.h
* \brief base definitions for mxnetcpp
* \author Chuntao Hong, Zhang Chen
@@ -18,6 +18,7 @@
*/
/*!
+* Copyright (c) 2016 by Contributors
* \file executor.h
* \brief executor definition
* \author Chuntao Hong, Zhang Chen
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2016 by Contributors
* \file initializer.h
* \brief random initializer
* \author Zhang Chen
@@ -18,6 +18,7 @@
*/
/*!
+* Copyright (c) 2016 by Contributors
* \file operator.h
* \brief definition of io, such as DataIter
* \author Zhang Chen
@@ -18,6 +18,7 @@
*/
/*!
+* Copyright (c) 2016 by Contributors
* \file kvstore.h
* \brief definition of kvstore
* \author Chuntao Hong
@@ -18,6 +18,7 @@
*/
/*!
+* Copyright (c) 2017 by Contributors
* \file lr_scheduler.h
* \brief Scheduling learning rate
*/
@@ -18,6 +18,7 @@
*/
/*!
+* Copyright (c) 2016 by Contributors
* \file base.h
* \brief metrics defined
* \author Zhang Chen
@@ -18,6 +18,7 @@
*/
/*!
+* Copyright (c) 2016 by Contributors
* \file model.h
* \brief MXNET.cpp model module
* \author Zhang Chen
@@ -18,6 +18,7 @@
*/
/*!
+* Copyright (c) 2017 by Contributors
* \file monitor.h
* \brief monitor definition
* \author Xin Li
@@ -18,6 +18,7 @@
*/
/*!
+* Copyright (c) 2016 by Contributors
* \file ndarray.h
* \brief definition of ndarray
* \author Chuntao Hong, Zhang Chen
@@ -18,6 +18,7 @@
*/
/*!
+* Copyright (c) 2016 by Contributors
* \file op_map.h
* \brief definition of OpMap
* \author Chuntao Hong
@@ -18,6 +18,7 @@
*/
/*!
+* Copyright (c) 2016 by Contributors
* \file op_suppl.h
* \brief A supplement and amendment of the operators from op.h
* \author Zhang Chen, zhubuntu, Xin Li
@@ -18,6 +18,7 @@
*/
/*!
+* Copyright (c) 2017 by Contributors
* \file op_util.h
* \brief operator helper functions
* \author Chris Olivier
@@ -18,6 +18,7 @@
*/
/*!
+* Copyright (c) 2016 by Contributors
* \file operator.h
* \brief definition of operator
* \author Chuntao Hong, Zhang Chen
@@ -18,6 +18,7 @@
*/
/*!
+* Copyright (c) 2016 by Contributors
* \file optimizer.h
* \brief definition of optimizer
* \author Chuntao Hong, Zhang Chen
@@ -18,6 +18,7 @@
*/
/*!
+* Copyright (c) 2016 by Contributors
* \file shape.h
* \brief definition of shape
* \author Chuntao Hong, Zhang Chen
@@ -18,6 +18,7 @@
*/
/*!
+* Copyright (c) 2016 by Contributors
* \file symbol.h
* \brief definition of symbol
* \author Chuntao Hong, Zhang Chen
View
@@ -27,7 +27,7 @@ echo "oracle-java8-installer shared/accepted-oracle-license-v1-1 select true" |
apt-get install -y oracle-java8-installer
apt-get install -y oracle-java8-set-default
-apt-get install -y maven
+apt-get install -y maven
wget http://downloads.lightbend.com/scala/2.11.8/scala-2.11.8.deb
dpkg -i scala-2.11.8.deb
@@ -29,10 +29,10 @@ def get_feature(internel_layer, layers, filters, batch_norm = False, **kwargs):
for j in range(num):
internel_layer = mx.sym.Convolution(data = internel_layer, kernel=(3, 3), pad=(1, 1), num_filter=filters[i], name="conv%s_%s" %(i + 1, j + 1))
if batch_norm:
- internel_layer = mx.symbol.BatchNorm(data=internel_layer, name="bn%s_%s" %(i + 1, j + 1))
+ internel_layer = mx.symbol.BatchNorm(data=internel_layer, name="bn%s_%s" %(i + 1, j + 1))
internel_layer = mx.sym.Activation(data=internel_layer, act_type="relu", name="relu%s_%s" %(i + 1, j + 1))
internel_layer = mx.sym.Pooling(data=internel_layer, pool_type="max", kernel=(2, 2), stride=(2,2), name="pool%s" %(i + 1))
- return internel_layer
+ return internel_layer
def get_classifier(input_data, num_classes, **kwargs):
flatten = mx.sym.Flatten(data=input_data, name="flatten")
@@ -43,7 +43,7 @@ def get_classifier(input_data, num_classes, **kwargs):
relu7 = mx.sym.Activation(data=fc7, act_type="relu", name="relu7")
drop7 = mx.sym.Dropout(data=relu7, p=0.5, name="drop7")
fc8 = mx.sym.FullyConnected(data=drop7, num_hidden=num_classes, name="fc8")
- return fc8
+ return fc8
def get_symbol(num_classes, num_layers=11, batch_norm=False, dtype='float32', **kwargs):
"""
@@ -54,23 +54,23 @@ def get_symbol(num_classes, num_layers=11, batch_norm=False, dtype='float32', **
num_layers : int
Number of layers for the variant of densenet. Options are 11, 13, 16, 19.
batch_norm : bool, default False
- Use batch normalization.
+ Use batch normalization.
dtype: str, float32 or float16
- Data precision.
+ Data precision.
"""
vgg_spec = {11: ([1, 1, 2, 2, 2], [64, 128, 256, 512, 512]),
13: ([2, 2, 2, 2, 2], [64, 128, 256, 512, 512]),
16: ([2, 2, 3, 3, 3], [64, 128, 256, 512, 512]),
19: ([2, 2, 4, 4, 4], [64, 128, 256, 512, 512])}
- if not vgg_spec.has_key(num_layers):
+ if not vgg_spec.has_key(num_layers):
raise ValueError("Invalide num_layers {}. Possible choices are 11,13,16,19.".format(num_layers))
- layers, filters = vgg_spec[num_layers]
+ layers, filters = vgg_spec[num_layers]
data = mx.sym.Variable(name="data")
if dtype == 'float16':
data = mx.sym.Cast(data=data, dtype=np.float16)
feature = get_feature(data, layers, filters, batch_norm)
classifier = get_classifier(feature, num_classes)
if dtype == 'float16':
- classifier = mx.sym.Cast(data=classifier, dtype=np.float32)
+ classifier = mx.sym.Cast(data=classifier, dtype=np.float32)
symbol = mx.sym.SoftmaxOutput(data=classifier, name='softmax')
return symbol
View
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2015 by Contributors
* \file base.h
* \brief configuation of mxnet as well as basic data structure.
*/
View
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2015 by Contributors
* \file c_api.h
* \brief C API of mxnet
*/
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2015 by Contributors
* \file c_predict_api.h
* \brief C predict API of mxnet, contains a minimum API to run prediction.
* This file is self-contained, and do not dependent on any other files.
View
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2015 by Contributors
* \file engine.h
* \brief Engine that schedules all the operations according to dependency.
*/
View
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2015 by Contributors
* \file executor.h
* \brief Symbolic executor interface of mxnet.
* \author Min Lin, Bing Xu
View
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2015 by Contributors
* \file io.h
* \brief mxnet io data structure and data iterator
*/
View
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2015 by Contributors
* \file kvstore.h
* \brief key-value store interface for mxnet
*/
View
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2015 by Contributors
* \file ndarray.h
* \brief NDArray interface that handles array arithematics.
*/
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2016 by Contributors
* \file op_attr_types.h
* \brief Additional operator attributes
* beside the ones provided by NNVM
View
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2015 by Contributors
* \file operator.h
* \brief Operator interface of mxnet.
* \author Naiyan Wang
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2015 by Contributors
* \file operator_util.h
* \brief Utility functions and registries to help quickly build new operators.
* [Deprecated]
View
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2015 by Contributors
* \file resource.h
* \brief Global resource allocation handling.
*/
View
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2015 by Contributors
* \file storage.h
* \brief Storage manager across multiple devices.
*/
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2014 by Contributors
* \file tensor_blob.h
* \brief TBlob class that holds common representation of
* arbirary dimension tensor, can be used to transformed
@@ -77,4 +77,4 @@ subtype "SymbolOrArrayOfSymbols" => as "AI::MXNet::Symbol|ArrayRef[AI::MXNet::Sy
subtype "NameShapeOrDataDesc" => as "NameShape|AI::MXNet::DataDesc";
subtype "AdvancedSlice" => as "ArrayRef[ArrayRef|PDL|PDL::Matrix|AI::MXNet::NDArray]";
-1;
+1;
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2016 by Contributors
* \file caffe_blob.cc
* \brief Implementations of SetDataGradToBlob given various device/dimension
* \author Haoran Wang
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2016 by Contributors
* \file caffe_blob.h
* \brief conversion between tensor and caffeBlob
* \author Haoran Wang
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2016 by Contributors
* \file caffe_common.h
* \brief Common functions for caffeOp and caffeLoss symbols
* \author Haoran Wang
@@ -18,6 +18,7 @@
*/
/*!
+ * Copyright (c) 2016 by Contributors
* \file caffe_common.h
* \brief Common functions for caffeOp and caffeLoss symbols
* \author Haoran Wang
Oops, something went wrong.

0 comments on commit d2a856a

Please sign in to comment.