git subrepo clone (merge) https://github.com/kubernetes-incubator/metrics-server.git metrics-server

subrepo:
  subdir:   "metrics-server"
  merged:   "92d8412"
upstream:
  origin:   "https://github.com/kubernetes-incubator/metrics-server.git"
  branch:   "master"
  commit:   "92d8412"
git-subrepo:
  version:  "0.4.0"
  origin:   "???"
  commit:   "???"
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/auth/clientauth.go b/metrics-server/vendor/k8s.io/client-go/tools/auth/clientauth.go
new file mode 100644
index 0000000..20339ab
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/auth/clientauth.go
@@ -0,0 +1,125 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package auth defines a file format for holding authentication
+information needed by clients of Kubernetes.  Typically,
+a Kubernetes cluster will put auth info for the admin in a known
+location when it is created, and will (soon) put it in a known
+location within a Container's file tree for Containers that
+need access to the Kubernetes API.
+
+Having a defined format allows:
+  - clients to be implemented in multiple languages
+  - applications which link clients to be portable across
+    clusters with different authentication styles (e.g.
+    some may use SSL Client certs, others may not, etc)
+  - when the format changes, applications only
+    need to update this code.
+
+The file format is json, marshalled from a struct authcfg.Info.
+
+Clinet libraries in other languages should use the same format.
+
+It is not intended to store general preferences, such as default
+namespace, output options, etc.  CLIs (such as kubectl) and UIs should
+develop their own format and may wish to inline the authcfg.Info type.
+
+The authcfg.Info is just a file format.  It is distinct from
+client.Config which holds options for creating a client.Client.
+Helper functions are provided in this package to fill in a
+client.Client from an authcfg.Info.
+
+Example:
+
+    import (
+        "pkg/client"
+        "pkg/client/auth"
+    )
+
+    info, err := auth.LoadFromFile(filename)
+    if err != nil {
+      // handle error
+    }
+    clientConfig = client.Config{}
+    clientConfig.Host = "example.com:4901"
+    clientConfig = info.MergeWithConfig()
+    client := client.New(clientConfig)
+    client.Pods(ns).List()
+*/
+package auth
+
+// TODO: need a way to rotate Tokens.  Therefore, need a way for client object to be reset when the authcfg is updated.
+import (
+	"encoding/json"
+	"io/ioutil"
+	"os"
+
+	restclient "k8s.io/client-go/rest"
+)
+
+// Info holds Kubernetes API authorization config.  It is intended
+// to be read/written from a file as a JSON object.
+type Info struct {
+	User        string
+	Password    string
+	CAFile      string
+	CertFile    string
+	KeyFile     string
+	BearerToken string
+	Insecure    *bool
+}
+
+// LoadFromFile parses an Info object from a file path.
+// If the file does not exist, then os.IsNotExist(err) == true
+func LoadFromFile(path string) (*Info, error) {
+	var info Info
+	if _, err := os.Stat(path); os.IsNotExist(err) {
+		return nil, err
+	}
+	data, err := ioutil.ReadFile(path)
+	if err != nil {
+		return nil, err
+	}
+	err = json.Unmarshal(data, &info)
+	if err != nil {
+		return nil, err
+	}
+	return &info, err
+}
+
+// MergeWithConfig returns a copy of a client.Config with values from the Info.
+// The fields of client.Config with a corresponding field in the Info are set
+// with the value from the Info.
+func (info Info) MergeWithConfig(c restclient.Config) (restclient.Config, error) {
+	var config restclient.Config = c
+	config.Username = info.User
+	config.Password = info.Password
+	config.CAFile = info.CAFile
+	config.CertFile = info.CertFile
+	config.KeyFile = info.KeyFile
+	config.BearerToken = info.BearerToken
+	if info.Insecure != nil {
+		config.Insecure = *info.Insecure
+	}
+	return config, nil
+}
+
+func (info Info) Complete() bool {
+	return len(info.User) > 0 ||
+		len(info.CertFile) > 0 ||
+		len(info.BearerToken) > 0
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/OWNERS b/metrics-server/vendor/k8s.io/client-go/tools/cache/OWNERS
new file mode 100755
index 0000000..727377f
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/OWNERS
@@ -0,0 +1,50 @@
+approvers:
+- thockin
+- lavalamp
+- smarterclayton
+- wojtek-t
+- deads2k
+- caesarxuchao
+- liggitt
+- ncdc
+reviewers:
+- thockin
+- lavalamp
+- smarterclayton
+- wojtek-t
+- deads2k
+- brendandburns
+- derekwaynecarr
+- caesarxuchao
+- mikedanese
+- liggitt
+- nikhiljindal
+- erictune
+- davidopp
+- pmorie
+- kargakis
+- janetkuo
+- justinsb
+- eparis
+- soltysh
+- jsafrane
+- dims
+- madhusudancs
+- hongchaodeng
+- krousey
+- markturansky
+- fgrzadkowski
+- xiang90
+- mml
+- ingvagabund
+- resouer
+- jessfraz
+- david-mcmahon
+- mfojtik
+- '249043822'
+- lixiaobing10051267
+- ddysher
+- mqliang
+- feihujiang
+- sdminonne
+- ncdc
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/controller.go b/metrics-server/vendor/k8s.io/client-go/tools/cache/controller.go
new file mode 100644
index 0000000..028c75e
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/controller.go
@@ -0,0 +1,394 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+	"sync"
+	"time"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/clock"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/apimachinery/pkg/util/wait"
+)
+
+// Config contains all the settings for a Controller.
+type Config struct {
+	// The queue for your objects; either a FIFO or
+	// a DeltaFIFO. Your Process() function should accept
+	// the output of this Queue's Pop() method.
+	Queue
+
+	// Something that can list and watch your objects.
+	ListerWatcher
+
+	// Something that can process your objects.
+	Process ProcessFunc
+
+	// The type of your objects.
+	ObjectType runtime.Object
+
+	// Reprocess everything at least this often.
+	// Note that if it takes longer for you to clear the queue than this
+	// period, you will end up processing items in the order determined
+	// by FIFO.Replace(). Currently, this is random. If this is a
+	// problem, we can change that replacement policy to append new
+	// things to the end of the queue instead of replacing the entire
+	// queue.
+	FullResyncPeriod time.Duration
+
+	// ShouldResync, if specified, is invoked when the controller's reflector determines the next
+	// periodic sync should occur. If this returns true, it means the reflector should proceed with
+	// the resync.
+	ShouldResync ShouldResyncFunc
+
+	// If true, when Process() returns an error, re-enqueue the object.
+	// TODO: add interface to let you inject a delay/backoff or drop
+	//       the object completely if desired. Pass the object in
+	//       question to this interface as a parameter.
+	RetryOnError bool
+}
+
+// ShouldResyncFunc is a type of function that indicates if a reflector should perform a
+// resync or not. It can be used by a shared informer to support multiple event handlers with custom
+// resync periods.
+type ShouldResyncFunc func() bool
+
+// ProcessFunc processes a single object.
+type ProcessFunc func(obj interface{}) error
+
+// Controller is a generic controller framework.
+type controller struct {
+	config         Config
+	reflector      *Reflector
+	reflectorMutex sync.RWMutex
+	clock          clock.Clock
+}
+
+type Controller interface {
+	Run(stopCh <-chan struct{})
+	HasSynced() bool
+	LastSyncResourceVersion() string
+}
+
+// New makes a new Controller from the given Config.
+func New(c *Config) Controller {
+	ctlr := &controller{
+		config: *c,
+		clock:  &clock.RealClock{},
+	}
+	return ctlr
+}
+
+// Run begins processing items, and will continue until a value is sent down stopCh.
+// It's an error to call Run more than once.
+// Run blocks; call via go.
+func (c *controller) Run(stopCh <-chan struct{}) {
+	defer utilruntime.HandleCrash()
+	go func() {
+		<-stopCh
+		c.config.Queue.Close()
+	}()
+	r := NewReflector(
+		c.config.ListerWatcher,
+		c.config.ObjectType,
+		c.config.Queue,
+		c.config.FullResyncPeriod,
+	)
+	r.ShouldResync = c.config.ShouldResync
+	r.clock = c.clock
+
+	c.reflectorMutex.Lock()
+	c.reflector = r
+	c.reflectorMutex.Unlock()
+
+	var wg wait.Group
+	defer wg.Wait()
+
+	wg.StartWithChannel(stopCh, r.Run)
+
+	wait.Until(c.processLoop, time.Second, stopCh)
+}
+
+// Returns true once this controller has completed an initial resource listing
+func (c *controller) HasSynced() bool {
+	return c.config.Queue.HasSynced()
+}
+
+func (c *controller) LastSyncResourceVersion() string {
+	if c.reflector == nil {
+		return ""
+	}
+	return c.reflector.LastSyncResourceVersion()
+}
+
+// processLoop drains the work queue.
+// TODO: Consider doing the processing in parallel. This will require a little thought
+// to make sure that we don't end up processing the same object multiple times
+// concurrently.
+//
+// TODO: Plumb through the stopCh here (and down to the queue) so that this can
+// actually exit when the controller is stopped. Or just give up on this stuff
+// ever being stoppable. Converting this whole package to use Context would
+// also be helpful.
+func (c *controller) processLoop() {
+	for {
+		obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process))
+		if err != nil {
+			if err == FIFOClosedError {
+				return
+			}
+			if c.config.RetryOnError {
+				// This is the safe way to re-enqueue.
+				c.config.Queue.AddIfNotPresent(obj)
+			}
+		}
+	}
+}
+
+// ResourceEventHandler can handle notifications for events that happen to a
+// resource. The events are informational only, so you can't return an
+// error.
+//  * OnAdd is called when an object is added.
+//  * OnUpdate is called when an object is modified. Note that oldObj is the
+//      last known state of the object-- it is possible that several changes
+//      were combined together, so you can't use this to see every single
+//      change. OnUpdate is also called when a re-list happens, and it will
+//      get called even if nothing changed. This is useful for periodically
+//      evaluating or syncing something.
+//  * OnDelete will get the final state of the item if it is known, otherwise
+//      it will get an object of type DeletedFinalStateUnknown. This can
+//      happen if the watch is closed and misses the delete event and we don't
+//      notice the deletion until the subsequent re-list.
+type ResourceEventHandler interface {
+	OnAdd(obj interface{})
+	OnUpdate(oldObj, newObj interface{})
+	OnDelete(obj interface{})
+}
+
+// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or
+// as few of the notification functions as you want while still implementing
+// ResourceEventHandler.
+type ResourceEventHandlerFuncs struct {
+	AddFunc    func(obj interface{})
+	UpdateFunc func(oldObj, newObj interface{})
+	DeleteFunc func(obj interface{})
+}
+
+// OnAdd calls AddFunc if it's not nil.
+func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) {
+	if r.AddFunc != nil {
+		r.AddFunc(obj)
+	}
+}
+
+// OnUpdate calls UpdateFunc if it's not nil.
+func (r ResourceEventHandlerFuncs) OnUpdate(oldObj, newObj interface{}) {
+	if r.UpdateFunc != nil {
+		r.UpdateFunc(oldObj, newObj)
+	}
+}
+
+// OnDelete calls DeleteFunc if it's not nil.
+func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {
+	if r.DeleteFunc != nil {
+		r.DeleteFunc(obj)
+	}
+}
+
+// FilteringResourceEventHandler applies the provided filter to all events coming
+// in, ensuring the appropriate nested handler method is invoked. An object
+// that starts passing the filter after an update is considered an add, and an
+// object that stops passing the filter after an update is considered a delete.
+type FilteringResourceEventHandler struct {
+	FilterFunc func(obj interface{}) bool
+	Handler    ResourceEventHandler
+}
+
+// OnAdd calls the nested handler only if the filter succeeds
+func (r FilteringResourceEventHandler) OnAdd(obj interface{}) {
+	if !r.FilterFunc(obj) {
+		return
+	}
+	r.Handler.OnAdd(obj)
+}
+
+// OnUpdate ensures the proper handler is called depending on whether the filter matches
+func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) {
+	newer := r.FilterFunc(newObj)
+	older := r.FilterFunc(oldObj)
+	switch {
+	case newer && older:
+		r.Handler.OnUpdate(oldObj, newObj)
+	case newer && !older:
+		r.Handler.OnAdd(newObj)
+	case !newer && older:
+		r.Handler.OnDelete(oldObj)
+	default:
+		// do nothing
+	}
+}
+
+// OnDelete calls the nested handler only if the filter succeeds
+func (r FilteringResourceEventHandler) OnDelete(obj interface{}) {
+	if !r.FilterFunc(obj) {
+		return
+	}
+	r.Handler.OnDelete(obj)
+}
+
+// DeletionHandlingMetaNamespaceKeyFunc checks for
+// DeletedFinalStateUnknown objects before calling
+// MetaNamespaceKeyFunc.
+func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
+	if d, ok := obj.(DeletedFinalStateUnknown); ok {
+		return d.Key, nil
+	}
+	return MetaNamespaceKeyFunc(obj)
+}
+
+// NewInformer returns a Store and a controller for populating the store
+// while also providing event notifications. You should only used the returned
+// Store for Get/List operations; Add/Modify/Deletes will cause the event
+// notifications to be faulty.
+//
+// Parameters:
+//  * lw is list and watch functions for the source of the resource you want to
+//    be informed of.
+//  * objType is an object of the type that you expect to receive.
+//  * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
+//    calls, even if nothing changed). Otherwise, re-list will be delayed as
+//    long as possible (until the upstream source closes the watch or times out,
+//    or you stop the controller).
+//  * h is the object you want notifications sent to.
+//
+func NewInformer(
+	lw ListerWatcher,
+	objType runtime.Object,
+	resyncPeriod time.Duration,
+	h ResourceEventHandler,
+) (Store, Controller) {
+	// This will hold the client state, as we know it.
+	clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
+
+	// This will hold incoming changes. Note how we pass clientState in as a
+	// KeyLister, that way resync operations will result in the correct set
+	// of update/delete deltas.
+	fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState)
+
+	cfg := &Config{
+		Queue:            fifo,
+		ListerWatcher:    lw,
+		ObjectType:       objType,
+		FullResyncPeriod: resyncPeriod,
+		RetryOnError:     false,
+
+		Process: func(obj interface{}) error {
+			// from oldest to newest
+			for _, d := range obj.(Deltas) {
+				switch d.Type {
+				case Sync, Added, Updated:
+					if old, exists, err := clientState.Get(d.Object); err == nil && exists {
+						if err := clientState.Update(d.Object); err != nil {
+							return err
+						}
+						h.OnUpdate(old, d.Object)
+					} else {
+						if err := clientState.Add(d.Object); err != nil {
+							return err
+						}
+						h.OnAdd(d.Object)
+					}
+				case Deleted:
+					if err := clientState.Delete(d.Object); err != nil {
+						return err
+					}
+					h.OnDelete(d.Object)
+				}
+			}
+			return nil
+		},
+	}
+	return clientState, New(cfg)
+}
+
+// NewIndexerInformer returns a Indexer and a controller for populating the index
+// while also providing event notifications. You should only used the returned
+// Index for Get/List operations; Add/Modify/Deletes will cause the event
+// notifications to be faulty.
+//
+// Parameters:
+//  * lw is list and watch functions for the source of the resource you want to
+//    be informed of.
+//  * objType is an object of the type that you expect to receive.
+//  * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
+//    calls, even if nothing changed). Otherwise, re-list will be delayed as
+//    long as possible (until the upstream source closes the watch or times out,
+//    or you stop the controller).
+//  * h is the object you want notifications sent to.
+//  * indexers is the indexer for the received object type.
+//
+func NewIndexerInformer(
+	lw ListerWatcher,
+	objType runtime.Object,
+	resyncPeriod time.Duration,
+	h ResourceEventHandler,
+	indexers Indexers,
+) (Indexer, Controller) {
+	// This will hold the client state, as we know it.
+	clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
+
+	// This will hold incoming changes. Note how we pass clientState in as a
+	// KeyLister, that way resync operations will result in the correct set
+	// of update/delete deltas.
+	fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState)
+
+	cfg := &Config{
+		Queue:            fifo,
+		ListerWatcher:    lw,
+		ObjectType:       objType,
+		FullResyncPeriod: resyncPeriod,
+		RetryOnError:     false,
+
+		Process: func(obj interface{}) error {
+			// from oldest to newest
+			for _, d := range obj.(Deltas) {
+				switch d.Type {
+				case Sync, Added, Updated:
+					if old, exists, err := clientState.Get(d.Object); err == nil && exists {
+						if err := clientState.Update(d.Object); err != nil {
+							return err
+						}
+						h.OnUpdate(old, d.Object)
+					} else {
+						if err := clientState.Add(d.Object); err != nil {
+							return err
+						}
+						h.OnAdd(d.Object)
+					}
+				case Deleted:
+					if err := clientState.Delete(d.Object); err != nil {
+						return err
+					}
+					h.OnDelete(d.Object)
+				}
+			}
+			return nil
+		},
+	}
+	return clientState, New(cfg)
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/metrics-server/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
new file mode 100644
index 0000000..45c3b50
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
@@ -0,0 +1,659 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+
+	"k8s.io/apimachinery/pkg/util/sets"
+
+	"github.com/golang/glog"
+)
+
+// NewDeltaFIFO returns a Store which can be used process changes to items.
+//
+// keyFunc is used to figure out what key an object should have. (It's
+// exposed in the returned DeltaFIFO's KeyOf() method, with bonus features.)
+//
+// 'keyLister' is expected to return a list of keys that the consumer of
+// this queue "knows about". It is used to decide which items are missing
+// when Replace() is called; 'Deleted' deltas are produced for these items.
+// It may be nil if you don't need to detect all deletions.
+// TODO: consider merging keyLister with this object, tracking a list of
+//       "known" keys when Pop() is called. Have to think about how that
+//       affects error retrying.
+// NOTE: It is possible to misuse this and cause a race when using an
+//       external known object source.
+//       Whether there is a potential race depends on how the comsumer
+//       modifies knownObjects. In Pop(), process function is called under
+//       lock, so it is safe to update data structures in it that need to be
+//       in sync with the queue (e.g. knownObjects).
+//
+//       Example:
+//       In case of sharedIndexInformer being a consumer
+//       (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/
+//       src/k8s.io/client-go/tools/cache/shared_informer.go#L192),
+//       there is no race as knownObjects (s.indexer) is modified safely
+//       under DeltaFIFO's lock. The only exceptions are GetStore() and
+//       GetIndexer() methods, which expose ways to modify the underlying
+//       storage. Currently these two methods are used for creating Lister
+//       and internal tests.
+//
+// Also see the comment on DeltaFIFO.
+func NewDeltaFIFO(keyFunc KeyFunc, knownObjects KeyListerGetter) *DeltaFIFO {
+	f := &DeltaFIFO{
+		items:        map[string]Deltas{},
+		queue:        []string{},
+		keyFunc:      keyFunc,
+		knownObjects: knownObjects,
+	}
+	f.cond.L = &f.lock
+	return f
+}
+
+// DeltaFIFO is like FIFO, but allows you to process deletes.
+//
+// DeltaFIFO is a producer-consumer queue, where a Reflector is
+// intended to be the producer, and the consumer is whatever calls
+// the Pop() method.
+//
+// DeltaFIFO solves this use case:
+//  * You want to process every object change (delta) at most once.
+//  * When you process an object, you want to see everything
+//    that's happened to it since you last processed it.
+//  * You want to process the deletion of objects.
+//  * You might want to periodically reprocess objects.
+//
+// DeltaFIFO's Pop(), Get(), and GetByKey() methods return
+// interface{} to satisfy the Store/Queue interfaces, but it
+// will always return an object of type Deltas.
+//
+// A note on threading: If you call Pop() in parallel from multiple
+// threads, you could end up with multiple threads processing slightly
+// different versions of the same object.
+//
+// A note on the KeyLister used by the DeltaFIFO: It's main purpose is
+// to list keys that are "known", for the purpose of figuring out which
+// items have been deleted when Replace() or Delete() are called. The deleted
+// object will be included in the DeleteFinalStateUnknown markers. These objects
+// could be stale.
+type DeltaFIFO struct {
+	// lock/cond protects access to 'items' and 'queue'.
+	lock sync.RWMutex
+	cond sync.Cond
+
+	// We depend on the property that items in the set are in
+	// the queue and vice versa, and that all Deltas in this
+	// map have at least one Delta.
+	items map[string]Deltas
+	queue []string
+
+	// populated is true if the first batch of items inserted by Replace() has been populated
+	// or Delete/Add/Update was called first.
+	populated bool
+	// initialPopulationCount is the number of items inserted by the first call of Replace()
+	initialPopulationCount int
+
+	// keyFunc is used to make the key used for queued item
+	// insertion and retrieval, and should be deterministic.
+	keyFunc KeyFunc
+
+	// knownObjects list keys that are "known", for the
+	// purpose of figuring out which items have been deleted
+	// when Replace() or Delete() is called.
+	knownObjects KeyListerGetter
+
+	// Indication the queue is closed.
+	// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
+	// Currently, not used to gate any of CRED operations.
+	closed     bool
+	closedLock sync.Mutex
+}
+
+var (
+	_ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue
+)
+
+var (
+	// ErrZeroLengthDeltasObject is returned in a KeyError if a Deltas
+	// object with zero length is encountered (should be impossible,
+	// but included for completeness).
+	ErrZeroLengthDeltasObject = errors.New("0 length Deltas object; can't get key")
+)
+
+// Close the queue.
+func (f *DeltaFIFO) Close() {
+	f.closedLock.Lock()
+	defer f.closedLock.Unlock()
+	f.closed = true
+	f.cond.Broadcast()
+}
+
+// KeyOf exposes f's keyFunc, but also detects the key of a Deltas object or
+// DeletedFinalStateUnknown objects.
+func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) {
+	if d, ok := obj.(Deltas); ok {
+		if len(d) == 0 {
+			return "", KeyError{obj, ErrZeroLengthDeltasObject}
+		}
+		obj = d.Newest().Object
+	}
+	if d, ok := obj.(DeletedFinalStateUnknown); ok {
+		return d.Key, nil
+	}
+	return f.keyFunc(obj)
+}
+
+// Return true if an Add/Update/Delete/AddIfNotPresent are called first,
+// or an Update called first but the first batch of items inserted by Replace() has been popped
+func (f *DeltaFIFO) HasSynced() bool {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	return f.populated && f.initialPopulationCount == 0
+}
+
+// Add inserts an item, and puts it in the queue. The item is only enqueued
+// if it doesn't already exist in the set.
+func (f *DeltaFIFO) Add(obj interface{}) error {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	f.populated = true
+	return f.queueActionLocked(Added, obj)
+}
+
+// Update is just like Add, but makes an Updated Delta.
+func (f *DeltaFIFO) Update(obj interface{}) error {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	f.populated = true
+	return f.queueActionLocked(Updated, obj)
+}
+
+// Delete is just like Add, but makes an Deleted Delta. If the item does not
+// already exist, it will be ignored. (It may have already been deleted by a
+// Replace (re-list), for example.
+func (f *DeltaFIFO) Delete(obj interface{}) error {
+	id, err := f.KeyOf(obj)
+	if err != nil {
+		return KeyError{obj, err}
+	}
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	f.populated = true
+	if f.knownObjects == nil {
+		if _, exists := f.items[id]; !exists {
+			// Presumably, this was deleted when a relist happened.
+			// Don't provide a second report of the same deletion.
+			return nil
+		}
+	} else {
+		// We only want to skip the "deletion" action if the object doesn't
+		// exist in knownObjects and it doesn't have corresponding item in items.
+		// Note that even if there is a "deletion" action in items, we can ignore it,
+		// because it will be deduped automatically in "queueActionLocked"
+		_, exists, err := f.knownObjects.GetByKey(id)
+		_, itemsExist := f.items[id]
+		if err == nil && !exists && !itemsExist {
+			// Presumably, this was deleted when a relist happened.
+			// Don't provide a second report of the same deletion.
+			return nil
+		}
+	}
+
+	return f.queueActionLocked(Deleted, obj)
+}
+
+// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already
+// present in the set, it is neither enqueued nor added to the set.
+//
+// This is useful in a single producer/consumer scenario so that the consumer can
+// safely retry items without contending with the producer and potentially enqueueing
+// stale items.
+//
+// Important: obj must be a Deltas (the output of the Pop() function). Yes, this is
+// different from the Add/Update/Delete functions.
+func (f *DeltaFIFO) AddIfNotPresent(obj interface{}) error {
+	deltas, ok := obj.(Deltas)
+	if !ok {
+		return fmt.Errorf("object must be of type deltas, but got: %#v", obj)
+	}
+	id, err := f.KeyOf(deltas.Newest().Object)
+	if err != nil {
+		return KeyError{obj, err}
+	}
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	f.addIfNotPresent(id, deltas)
+	return nil
+}
+
+// addIfNotPresent inserts deltas under id if it does not exist, and assumes the caller
+// already holds the fifo lock.
+func (f *DeltaFIFO) addIfNotPresent(id string, deltas Deltas) {
+	f.populated = true
+	if _, exists := f.items[id]; exists {
+		return
+	}
+
+	f.queue = append(f.queue, id)
+	f.items[id] = deltas
+	f.cond.Broadcast()
+}
+
+// re-listing and watching can deliver the same update multiple times in any
+// order. This will combine the most recent two deltas if they are the same.
+func dedupDeltas(deltas Deltas) Deltas {
+	n := len(deltas)
+	if n < 2 {
+		return deltas
+	}
+	a := &deltas[n-1]
+	b := &deltas[n-2]
+	if out := isDup(a, b); out != nil {
+		d := append(Deltas{}, deltas[:n-2]...)
+		return append(d, *out)
+	}
+	return deltas
+}
+
+// If a & b represent the same event, returns the delta that ought to be kept.
+// Otherwise, returns nil.
+// TODO: is there anything other than deletions that need deduping?
+func isDup(a, b *Delta) *Delta {
+	if out := isDeletionDup(a, b); out != nil {
+		return out
+	}
+	// TODO: Detect other duplicate situations? Are there any?
+	return nil
+}
+
+// keep the one with the most information if both are deletions.
+func isDeletionDup(a, b *Delta) *Delta {
+	if b.Type != Deleted || a.Type != Deleted {
+		return nil
+	}
+	// Do more sophisticated checks, or is this sufficient?
+	if _, ok := b.Object.(DeletedFinalStateUnknown); ok {
+		return a
+	}
+	return b
+}
+
+// willObjectBeDeletedLocked returns true only if the last delta for the
+// given object is Delete. Caller must lock first.
+func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool {
+	deltas := f.items[id]
+	return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted
+}
+
+// queueActionLocked appends to the delta list for the object.
+// Caller must lock first.
+func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error {
+	id, err := f.KeyOf(obj)
+	if err != nil {
+		return KeyError{obj, err}
+	}
+
+	// If object is supposed to be deleted (last event is Deleted),
+	// then we should ignore Sync events, because it would result in
+	// recreation of this object.
+	if actionType == Sync && f.willObjectBeDeletedLocked(id) {
+		return nil
+	}
+
+	newDeltas := append(f.items[id], Delta{actionType, obj})
+	newDeltas = dedupDeltas(newDeltas)
+
+	_, exists := f.items[id]
+	if len(newDeltas) > 0 {
+		if !exists {
+			f.queue = append(f.queue, id)
+		}
+		f.items[id] = newDeltas
+		f.cond.Broadcast()
+	} else if exists {
+		// We need to remove this from our map (extra items
+		// in the queue are ignored if they are not in the
+		// map).
+		delete(f.items, id)
+	}
+	return nil
+}
+
+// List returns a list of all the items; it returns the object
+// from the most recent Delta.
+// You should treat the items returned inside the deltas as immutable.
+func (f *DeltaFIFO) List() []interface{} {
+	f.lock.RLock()
+	defer f.lock.RUnlock()
+	return f.listLocked()
+}
+
+func (f *DeltaFIFO) listLocked() []interface{} {
+	list := make([]interface{}, 0, len(f.items))
+	for _, item := range f.items {
+		// Copy item's slice so operations on this slice
+		// won't interfere with the object we return.
+		item = copyDeltas(item)
+		list = append(list, item.Newest().Object)
+	}
+	return list
+}
+
+// ListKeys returns a list of all the keys of the objects currently
+// in the FIFO.
+func (f *DeltaFIFO) ListKeys() []string {
+	f.lock.RLock()
+	defer f.lock.RUnlock()
+	list := make([]string, 0, len(f.items))
+	for key := range f.items {
+		list = append(list, key)
+	}
+	return list
+}
+
+// Get returns the complete list of deltas for the requested item,
+// or sets exists=false.
+// You should treat the items returned inside the deltas as immutable.
+func (f *DeltaFIFO) Get(obj interface{}) (item interface{}, exists bool, err error) {
+	key, err := f.KeyOf(obj)
+	if err != nil {
+		return nil, false, KeyError{obj, err}
+	}
+	return f.GetByKey(key)
+}
+
+// GetByKey returns the complete list of deltas for the requested item,
+// setting exists=false if that list is empty.
+// You should treat the items returned inside the deltas as immutable.
+func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
+	f.lock.RLock()
+	defer f.lock.RUnlock()
+	d, exists := f.items[key]
+	if exists {
+		// Copy item's slice so operations on this slice
+		// won't interfere with the object we return.
+		d = copyDeltas(d)
+	}
+	return d, exists, nil
+}
+
+// Checks if the queue is closed
+func (f *DeltaFIFO) IsClosed() bool {
+	f.closedLock.Lock()
+	defer f.closedLock.Unlock()
+	if f.closed {
+		return true
+	}
+	return false
+}
+
+// Pop blocks until an item is added to the queue, and then returns it.  If
+// multiple items are ready, they are returned in the order in which they were
+// added/updated. The item is removed from the queue (and the store) before it
+// is returned, so if you don't successfully process it, you need to add it back
+// with AddIfNotPresent().
+// process function is called under lock, so it is safe update data structures
+// in it that need to be in sync with the queue (e.g. knownKeys). The PopProcessFunc
+// may return an instance of ErrRequeue with a nested error to indicate the current
+// item should be requeued (equivalent to calling AddIfNotPresent under the lock).
+//
+// Pop returns a 'Deltas', which has a complete list of all the things
+// that happened to the object (deltas) while it was sitting in the queue.
+func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	for {
+		for len(f.queue) == 0 {
+			// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
+			// When Close() is called, the f.closed is set and the condition is broadcasted.
+			// Which causes this loop to continue and return from the Pop().
+			if f.IsClosed() {
+				return nil, FIFOClosedError
+			}
+
+			f.cond.Wait()
+		}
+		id := f.queue[0]
+		f.queue = f.queue[1:]
+		item, ok := f.items[id]
+		if f.initialPopulationCount > 0 {
+			f.initialPopulationCount--
+		}
+		if !ok {
+			// Item may have been deleted subsequently.
+			continue
+		}
+		delete(f.items, id)
+		err := process(item)
+		if e, ok := err.(ErrRequeue); ok {
+			f.addIfNotPresent(id, item)
+			err = e.Err
+		}
+		// Don't need to copyDeltas here, because we're transferring
+		// ownership to the caller.
+		return item, err
+	}
+}
+
+// Replace will delete the contents of 'f', using instead the given map.
+// 'f' takes ownership of the map, you should not reference the map again
+// after calling this function. f's queue is reset, too; upon return, it
+// will contain the items in the map, in no particular order.
+func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	keys := make(sets.String, len(list))
+
+	for _, item := range list {
+		key, err := f.KeyOf(item)
+		if err != nil {
+			return KeyError{item, err}
+		}
+		keys.Insert(key)
+		if err := f.queueActionLocked(Sync, item); err != nil {
+			return fmt.Errorf("couldn't enqueue object: %v", err)
+		}
+	}
+
+	if f.knownObjects == nil {
+		// Do deletion detection against our own list.
+		for k, oldItem := range f.items {
+			if keys.Has(k) {
+				continue
+			}
+			var deletedObj interface{}
+			if n := oldItem.Newest(); n != nil {
+				deletedObj = n.Object
+			}
+			if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
+				return err
+			}
+		}
+
+		if !f.populated {
+			f.populated = true
+			f.initialPopulationCount = len(list)
+		}
+
+		return nil
+	}
+
+	// Detect deletions not already in the queue.
+	knownKeys := f.knownObjects.ListKeys()
+	queuedDeletions := 0
+	for _, k := range knownKeys {
+		if keys.Has(k) {
+			continue
+		}
+
+		deletedObj, exists, err := f.knownObjects.GetByKey(k)
+		if err != nil {
+			deletedObj = nil
+			glog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
+		} else if !exists {
+			deletedObj = nil
+			glog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
+		}
+		queuedDeletions++
+		if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
+			return err
+		}
+	}
+
+	if !f.populated {
+		f.populated = true
+		f.initialPopulationCount = len(list) + queuedDeletions
+	}
+
+	return nil
+}
+
+// Resync will send a sync event for each item
+func (f *DeltaFIFO) Resync() error {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+
+	if f.knownObjects == nil {
+		return nil
+	}
+
+	keys := f.knownObjects.ListKeys()
+	for _, k := range keys {
+		if err := f.syncKeyLocked(k); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (f *DeltaFIFO) syncKey(key string) error {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+
+	return f.syncKeyLocked(key)
+}
+
+func (f *DeltaFIFO) syncKeyLocked(key string) error {
+	obj, exists, err := f.knownObjects.GetByKey(key)
+	if err != nil {
+		glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key)
+		return nil
+	} else if !exists {
+		glog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key)
+		return nil
+	}
+
+	// If we are doing Resync() and there is already an event queued for that object,
+	// we ignore the Resync for it. This is to avoid the race, in which the resync
+	// comes with the previous value of object (since queueing an event for the object
+	// doesn't trigger changing the underlying store <knownObjects>.
+	id, err := f.KeyOf(obj)
+	if err != nil {
+		return KeyError{obj, err}
+	}
+	if len(f.items[id]) > 0 {
+		return nil
+	}
+
+	if err := f.queueActionLocked(Sync, obj); err != nil {
+		return fmt.Errorf("couldn't queue object: %v", err)
+	}
+	return nil
+}
+
+// A KeyListerGetter is anything that knows how to list its keys and look up by key.
+type KeyListerGetter interface {
+	KeyLister
+	KeyGetter
+}
+
+// A KeyLister is anything that knows how to list its keys.
+type KeyLister interface {
+	ListKeys() []string
+}
+
+// A KeyGetter is anything that knows how to get the value stored under a given key.
+type KeyGetter interface {
+	GetByKey(key string) (interface{}, bool, error)
+}
+
+// DeltaType is the type of a change (addition, deletion, etc)
+type DeltaType string
+
+const (
+	Added   DeltaType = "Added"
+	Updated DeltaType = "Updated"
+	Deleted DeltaType = "Deleted"
+	// The other types are obvious. You'll get Sync deltas when:
+	//  * A watch expires/errors out and a new list/watch cycle is started.
+	//  * You've turned on periodic syncs.
+	// (Anything that trigger's DeltaFIFO's Replace() method.)
+	Sync DeltaType = "Sync"
+)
+
+// Delta is the type stored by a DeltaFIFO. It tells you what change
+// happened, and the object's state after* that change.
+//
+// [*] Unless the change is a deletion, and then you'll get the final
+//     state of the object before it was deleted.
+type Delta struct {
+	Type   DeltaType
+	Object interface{}
+}
+
+// Deltas is a list of one or more 'Delta's to an individual object.
+// The oldest delta is at index 0, the newest delta is the last one.
+type Deltas []Delta
+
+// Oldest is a convenience function that returns the oldest delta, or
+// nil if there are no deltas.
+func (d Deltas) Oldest() *Delta {
+	if len(d) > 0 {
+		return &d[0]
+	}
+	return nil
+}
+
+// Newest is a convenience function that returns the newest delta, or
+// nil if there are no deltas.
+func (d Deltas) Newest() *Delta {
+	if n := len(d); n > 0 {
+		return &d[n-1]
+	}
+	return nil
+}
+
+// copyDeltas returns a shallow copy of d; that is, it copies the slice but not
+// the objects in the slice. This allows Get/List to return an object that we
+// know won't be clobbered by a subsequent modifications.
+func copyDeltas(d Deltas) Deltas {
+	d2 := make(Deltas, len(d))
+	copy(d2, d)
+	return d2
+}
+
+// DeletedFinalStateUnknown is placed into a DeltaFIFO in the case where
+// an object was deleted but the watch deletion event was missed. In this
+// case we don't know the final "resting" state of the object, so there's
+// a chance the included `Obj` is stale.
+type DeletedFinalStateUnknown struct {
+	Key string
+	Obj interface{}
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/doc.go b/metrics-server/vendor/k8s.io/client-go/tools/cache/doc.go
new file mode 100644
index 0000000..56b61d3
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package cache is a client-side caching mechanism. It is useful for
+// reducing the number of server calls you'd otherwise need to make.
+// Reflector watches a server and updates a Store. Two stores are provided;
+// one that simply caches objects (for example, to allow a scheduler to
+// list currently available nodes), and one that additionally acts as
+// a FIFO queue (for example, to allow a scheduler to process incoming
+// pods).
+package cache // import "k8s.io/client-go/tools/cache"
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/expiration_cache.go b/metrics-server/vendor/k8s.io/client-go/tools/cache/expiration_cache.go
new file mode 100644
index 0000000..fa88fc4
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/expiration_cache.go
@@ -0,0 +1,208 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+	"sync"
+	"time"
+
+	"github.com/golang/glog"
+	"k8s.io/apimachinery/pkg/util/clock"
+)
+
+// ExpirationCache implements the store interface
+//	1. All entries are automatically time stamped on insert
+//		a. The key is computed based off the original item/keyFunc
+//		b. The value inserted under that key is the timestamped item
+//	2. Expiration happens lazily on read based on the expiration policy
+//      a. No item can be inserted into the store while we're expiring
+//		   *any* item in the cache.
+//	3. Time-stamps are stripped off unexpired entries before return
+// Note that the ExpirationCache is inherently slower than a normal
+// threadSafeStore because it takes a write lock every time it checks if
+// an item has expired.
+type ExpirationCache struct {
+	cacheStorage     ThreadSafeStore
+	keyFunc          KeyFunc
+	clock            clock.Clock
+	expirationPolicy ExpirationPolicy
+	// expirationLock is a write lock used to guarantee that we don't clobber
+	// newly inserted objects because of a stale expiration timestamp comparison
+	expirationLock sync.Mutex
+}
+
+// ExpirationPolicy dictates when an object expires. Currently only abstracted out
+// so unittests don't rely on the system clock.
+type ExpirationPolicy interface {
+	IsExpired(obj *timestampedEntry) bool
+}
+
+// TTLPolicy implements a ttl based ExpirationPolicy.
+type TTLPolicy struct {
+	//	 >0: Expire entries with an age > ttl
+	//	<=0: Don't expire any entry
+	Ttl time.Duration
+
+	// Clock used to calculate ttl expiration
+	Clock clock.Clock
+}
+
+// IsExpired returns true if the given object is older than the ttl, or it can't
+// determine its age.
+func (p *TTLPolicy) IsExpired(obj *timestampedEntry) bool {
+	return p.Ttl > 0 && p.Clock.Since(obj.timestamp) > p.Ttl
+}
+
+// timestampedEntry is the only type allowed in a ExpirationCache.
+type timestampedEntry struct {
+	obj       interface{}
+	timestamp time.Time
+}
+
+// getTimestampedEntry returns the timestampedEntry stored under the given key.
+func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) {
+	item, _ := c.cacheStorage.Get(key)
+	if tsEntry, ok := item.(*timestampedEntry); ok {
+		return tsEntry, true
+	}
+	return nil, false
+}
+
+// getOrExpire retrieves the object from the timestampedEntry if and only if it hasn't
+// already expired. It holds a write lock across deletion.
+func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
+	// Prevent all inserts from the time we deem an item as "expired" to when we
+	// delete it, so an un-expired item doesn't sneak in under the same key, just
+	// before the Delete.
+	c.expirationLock.Lock()
+	defer c.expirationLock.Unlock()
+	timestampedItem, exists := c.getTimestampedEntry(key)
+	if !exists {
+		return nil, false
+	}
+	if c.expirationPolicy.IsExpired(timestampedItem) {
+		glog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj)
+		c.cacheStorage.Delete(key)
+		return nil, false
+	}
+	return timestampedItem.obj, true
+}
+
+// GetByKey returns the item stored under the key, or sets exists=false.
+func (c *ExpirationCache) GetByKey(key string) (interface{}, bool, error) {
+	obj, exists := c.getOrExpire(key)
+	return obj, exists, nil
+}
+
+// Get returns unexpired items. It purges the cache of expired items in the
+// process.
+func (c *ExpirationCache) Get(obj interface{}) (interface{}, bool, error) {
+	key, err := c.keyFunc(obj)
+	if err != nil {
+		return nil, false, KeyError{obj, err}
+	}
+	obj, exists := c.getOrExpire(key)
+	return obj, exists, nil
+}
+
+// List retrieves a list of unexpired items. It purges the cache of expired
+// items in the process.
+func (c *ExpirationCache) List() []interface{} {
+	items := c.cacheStorage.List()
+
+	list := make([]interface{}, 0, len(items))
+	for _, item := range items {
+		obj := item.(*timestampedEntry).obj
+		if key, err := c.keyFunc(obj); err != nil {
+			list = append(list, obj)
+		} else if obj, exists := c.getOrExpire(key); exists {
+			list = append(list, obj)
+		}
+	}
+	return list
+}
+
+// ListKeys returns a list of all keys in the expiration cache.
+func (c *ExpirationCache) ListKeys() []string {
+	return c.cacheStorage.ListKeys()
+}
+
+// Add timestamps an item and inserts it into the cache, overwriting entries
+// that might exist under the same key.
+func (c *ExpirationCache) Add(obj interface{}) error {
+	c.expirationLock.Lock()
+	defer c.expirationLock.Unlock()
+
+	key, err := c.keyFunc(obj)
+	if err != nil {
+		return KeyError{obj, err}
+	}
+	c.cacheStorage.Add(key, &timestampedEntry{obj, c.clock.Now()})
+	return nil
+}
+
+// Update has not been implemented yet for lack of a use case, so this method
+// simply calls `Add`. This effectively refreshes the timestamp.
+func (c *ExpirationCache) Update(obj interface{}) error {
+	return c.Add(obj)
+}
+
+// Delete removes an item from the cache.
+func (c *ExpirationCache) Delete(obj interface{}) error {
+	c.expirationLock.Lock()
+	defer c.expirationLock.Unlock()
+	key, err := c.keyFunc(obj)
+	if err != nil {
+		return KeyError{obj, err}
+	}
+	c.cacheStorage.Delete(key)
+	return nil
+}
+
+// Replace will convert all items in the given list to TimestampedEntries
+// before attempting the replace operation. The replace operation will
+// delete the contents of the ExpirationCache `c`.
+func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error {
+	c.expirationLock.Lock()
+	defer c.expirationLock.Unlock()
+	items := map[string]interface{}{}
+	ts := c.clock.Now()
+	for _, item := range list {
+		key, err := c.keyFunc(item)
+		if err != nil {
+			return KeyError{item, err}
+		}
+		items[key] = &timestampedEntry{item, ts}
+	}
+	c.cacheStorage.Replace(items, resourceVersion)
+	return nil
+}
+
+// Resync will touch all objects to put them into the processing queue
+func (c *ExpirationCache) Resync() error {
+	return c.cacheStorage.Resync()
+}
+
+// NewTTLStore creates and returns a ExpirationCache with a TTLPolicy
+func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store {
+	return &ExpirationCache{
+		cacheStorage:     NewThreadSafeStore(Indexers{}, Indices{}),
+		keyFunc:          keyFunc,
+		clock:            clock.RealClock{},
+		expirationPolicy: &TTLPolicy{ttl, clock.RealClock{}},
+	}
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go b/metrics-server/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
new file mode 100644
index 0000000..a096765
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+	"k8s.io/apimachinery/pkg/util/clock"
+	"k8s.io/apimachinery/pkg/util/sets"
+)
+
+type fakeThreadSafeMap struct {
+	ThreadSafeStore
+	deletedKeys chan<- string
+}
+
+func (c *fakeThreadSafeMap) Delete(key string) {
+	if c.deletedKeys != nil {
+		c.ThreadSafeStore.Delete(key)
+		c.deletedKeys <- key
+	}
+}
+
+type FakeExpirationPolicy struct {
+	NeverExpire     sets.String
+	RetrieveKeyFunc KeyFunc
+}
+
+func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool {
+	key, _ := p.RetrieveKeyFunc(obj)
+	return !p.NeverExpire.Has(key)
+}
+
+func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock clock.Clock) Store {
+	cacheStorage := NewThreadSafeStore(Indexers{}, Indices{})
+	return &ExpirationCache{
+		cacheStorage:     &fakeThreadSafeMap{cacheStorage, deletedKeys},
+		keyFunc:          keyFunc,
+		clock:            cacheClock,
+		expirationPolicy: expirationPolicy,
+	}
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go b/metrics-server/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go
new file mode 100644
index 0000000..8d71c24
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go
@@ -0,0 +1,102 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+// FakeStore lets you define custom functions for store operations
+type FakeCustomStore struct {
+	AddFunc      func(obj interface{}) error
+	UpdateFunc   func(obj interface{}) error
+	DeleteFunc   func(obj interface{}) error
+	ListFunc     func() []interface{}
+	ListKeysFunc func() []string
+	GetFunc      func(obj interface{}) (item interface{}, exists bool, err error)
+	GetByKeyFunc func(key string) (item interface{}, exists bool, err error)
+	ReplaceFunc  func(list []interface{}, resourceVerion string) error
+	ResyncFunc   func() error
+}
+
+// Add calls the custom Add function if defined
+func (f *FakeCustomStore) Add(obj interface{}) error {
+	if f.AddFunc != nil {
+		return f.AddFunc(obj)
+	}
+	return nil
+}
+
+// Update calls the custom Update function if defined
+func (f *FakeCustomStore) Update(obj interface{}) error {
+	if f.UpdateFunc != nil {
+		return f.Update(obj)
+	}
+	return nil
+}
+
+// Delete calls the custom Delete function if defined
+func (f *FakeCustomStore) Delete(obj interface{}) error {
+	if f.DeleteFunc != nil {
+		return f.DeleteFunc(obj)
+	}
+	return nil
+}
+
+// List calls the custom List function if defined
+func (f *FakeCustomStore) List() []interface{} {
+	if f.ListFunc != nil {
+		return f.ListFunc()
+	}
+	return nil
+}
+
+// ListKeys calls the custom ListKeys function if defined
+func (f *FakeCustomStore) ListKeys() []string {
+	if f.ListKeysFunc != nil {
+		return f.ListKeysFunc()
+	}
+	return nil
+}
+
+// Get calls the custom Get function if defined
+func (f *FakeCustomStore) Get(obj interface{}) (item interface{}, exists bool, err error) {
+	if f.GetFunc != nil {
+		return f.GetFunc(obj)
+	}
+	return nil, false, nil
+}
+
+// GetByKey calls the custom GetByKey function if defined
+func (f *FakeCustomStore) GetByKey(key string) (item interface{}, exists bool, err error) {
+	if f.GetByKeyFunc != nil {
+		return f.GetByKeyFunc(key)
+	}
+	return nil, false, nil
+}
+
+// Replace calls the custom Replace function if defined
+func (f *FakeCustomStore) Replace(list []interface{}, resourceVersion string) error {
+	if f.ReplaceFunc != nil {
+		return f.ReplaceFunc(list, resourceVersion)
+	}
+	return nil
+}
+
+// Resync calls the custom Resync function if defined
+func (f *FakeCustomStore) Resync() error {
+	if f.ResyncFunc != nil {
+		return f.ResyncFunc()
+	}
+	return nil
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/fifo.go b/metrics-server/vendor/k8s.io/client-go/tools/cache/fifo.go
new file mode 100644
index 0000000..e05c01e
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/fifo.go
@@ -0,0 +1,358 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+	"errors"
+	"sync"
+
+	"k8s.io/apimachinery/pkg/util/sets"
+)
+
+// PopProcessFunc is passed to Pop() method of Queue interface.
+// It is supposed to process the element popped from the queue.
+type PopProcessFunc func(interface{}) error
+
+// ErrRequeue may be returned by a PopProcessFunc to safely requeue
+// the current item. The value of Err will be returned from Pop.
+type ErrRequeue struct {
+	// Err is returned by the Pop function
+	Err error
+}
+
+var FIFOClosedError error = errors.New("DeltaFIFO: manipulating with closed queue")
+
+func (e ErrRequeue) Error() string {
+	if e.Err == nil {
+		return "the popped item should be requeued without returning an error"
+	}
+	return e.Err.Error()
+}
+
+// Queue is exactly like a Store, but has a Pop() method too.
+type Queue interface {
+	Store
+
+	// Pop blocks until it has something to process.
+	// It returns the object that was process and the result of processing.
+	// The PopProcessFunc may return an ErrRequeue{...} to indicate the item
+	// should be requeued before releasing the lock on the queue.
+	Pop(PopProcessFunc) (interface{}, error)
+
+	// AddIfNotPresent adds a value previously
+	// returned by Pop back into the queue as long
+	// as nothing else (presumably more recent)
+	// has since been added.
+	AddIfNotPresent(interface{}) error
+
+	// HasSynced returns true if the first batch of items has been popped
+	HasSynced() bool
+
+	// Close queue
+	Close()
+}
+
+// Helper function for popping from Queue.
+// WARNING: Do NOT use this function in non-test code to avoid races
+// unless you really really really really know what you are doing.
+func Pop(queue Queue) interface{} {
+	var result interface{}
+	queue.Pop(func(obj interface{}) error {
+		result = obj
+		return nil
+	})
+	return result
+}
+
+// FIFO receives adds and updates from a Reflector, and puts them in a queue for
+// FIFO order processing. If multiple adds/updates of a single item happen while
+// an item is in the queue before it has been processed, it will only be
+// processed once, and when it is processed, the most recent version will be
+// processed. This can't be done with a channel.
+//
+// FIFO solves this use case:
+//  * You want to process every object (exactly) once.
+//  * You want to process the most recent version of the object when you process it.
+//  * You do not want to process deleted objects, they should be removed from the queue.
+//  * You do not want to periodically reprocess objects.
+// Compare with DeltaFIFO for other use cases.
+type FIFO struct {
+	lock sync.RWMutex
+	cond sync.Cond
+	// We depend on the property that items in the set are in the queue and vice versa.
+	items map[string]interface{}
+	queue []string
+
+	// populated is true if the first batch of items inserted by Replace() has been populated
+	// or Delete/Add/Update was called first.
+	populated bool
+	// initialPopulationCount is the number of items inserted by the first call of Replace()
+	initialPopulationCount int
+
+	// keyFunc is used to make the key used for queued item insertion and retrieval, and
+	// should be deterministic.
+	keyFunc KeyFunc
+
+	// Indication the queue is closed.
+	// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
+	// Currently, not used to gate any of CRED operations.
+	closed     bool
+	closedLock sync.Mutex
+}
+
+var (
+	_ = Queue(&FIFO{}) // FIFO is a Queue
+)
+
+// Close the queue.
+func (f *FIFO) Close() {
+	f.closedLock.Lock()
+	defer f.closedLock.Unlock()
+	f.closed = true
+	f.cond.Broadcast()
+}
+
+// Return true if an Add/Update/Delete/AddIfNotPresent are called first,
+// or an Update called first but the first batch of items inserted by Replace() has been popped
+func (f *FIFO) HasSynced() bool {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	return f.populated && f.initialPopulationCount == 0
+}
+
+// Add inserts an item, and puts it in the queue. The item is only enqueued
+// if it doesn't already exist in the set.
+func (f *FIFO) Add(obj interface{}) error {
+	id, err := f.keyFunc(obj)
+	if err != nil {
+		return KeyError{obj, err}
+	}
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	f.populated = true
+	if _, exists := f.items[id]; !exists {
+		f.queue = append(f.queue, id)
+	}
+	f.items[id] = obj
+	f.cond.Broadcast()
+	return nil
+}
+
+// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already
+// present in the set, it is neither enqueued nor added to the set.
+//
+// This is useful in a single producer/consumer scenario so that the consumer can
+// safely retry items without contending with the producer and potentially enqueueing
+// stale items.
+func (f *FIFO) AddIfNotPresent(obj interface{}) error {
+	id, err := f.keyFunc(obj)
+	if err != nil {
+		return KeyError{obj, err}
+	}
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	f.addIfNotPresent(id, obj)
+	return nil
+}
+
+// addIfNotPresent assumes the fifo lock is already held and adds the provided
+// item to the queue under id if it does not already exist.
+func (f *FIFO) addIfNotPresent(id string, obj interface{}) {
+	f.populated = true
+	if _, exists := f.items[id]; exists {
+		return
+	}
+
+	f.queue = append(f.queue, id)
+	f.items[id] = obj
+	f.cond.Broadcast()
+}
+
+// Update is the same as Add in this implementation.
+func (f *FIFO) Update(obj interface{}) error {
+	return f.Add(obj)
+}
+
+// Delete removes an item. It doesn't add it to the queue, because
+// this implementation assumes the consumer only cares about the objects,
+// not the order in which they were created/added.
+func (f *FIFO) Delete(obj interface{}) error {
+	id, err := f.keyFunc(obj)
+	if err != nil {
+		return KeyError{obj, err}
+	}
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	f.populated = true
+	delete(f.items, id)
+	return err
+}
+
+// List returns a list of all the items.
+func (f *FIFO) List() []interface{} {
+	f.lock.RLock()
+	defer f.lock.RUnlock()
+	list := make([]interface{}, 0, len(f.items))
+	for _, item := range f.items {
+		list = append(list, item)
+	}
+	return list
+}
+
+// ListKeys returns a list of all the keys of the objects currently
+// in the FIFO.
+func (f *FIFO) ListKeys() []string {
+	f.lock.RLock()
+	defer f.lock.RUnlock()
+	list := make([]string, 0, len(f.items))
+	for key := range f.items {
+		list = append(list, key)
+	}
+	return list
+}
+
+// Get returns the requested item, or sets exists=false.
+func (f *FIFO) Get(obj interface{}) (item interface{}, exists bool, err error) {
+	key, err := f.keyFunc(obj)
+	if err != nil {
+		return nil, false, KeyError{obj, err}
+	}
+	return f.GetByKey(key)
+}
+
+// GetByKey returns the requested item, or sets exists=false.
+func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
+	f.lock.RLock()
+	defer f.lock.RUnlock()
+	item, exists = f.items[key]
+	return item, exists, nil
+}
+
+// Checks if the queue is closed
+func (f *FIFO) IsClosed() bool {
+	f.closedLock.Lock()
+	defer f.closedLock.Unlock()
+	if f.closed {
+		return true
+	}
+	return false
+}
+
+// Pop waits until an item is ready and processes it. If multiple items are
+// ready, they are returned in the order in which they were added/updated.
+// The item is removed from the queue (and the store) before it is processed,
+// so if you don't successfully process it, it should be added back with
+// AddIfNotPresent(). process function is called under lock, so it is safe
+// update data structures in it that need to be in sync with the queue.
+func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	for {
+		for len(f.queue) == 0 {
+			// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
+			// When Close() is called, the f.closed is set and the condition is broadcasted.
+			// Which causes this loop to continue and return from the Pop().
+			if f.IsClosed() {
+				return nil, FIFOClosedError
+			}
+
+			f.cond.Wait()
+		}
+		id := f.queue[0]
+		f.queue = f.queue[1:]
+		if f.initialPopulationCount > 0 {
+			f.initialPopulationCount--
+		}
+		item, ok := f.items[id]
+		if !ok {
+			// Item may have been deleted subsequently.
+			continue
+		}
+		delete(f.items, id)
+		err := process(item)
+		if e, ok := err.(ErrRequeue); ok {
+			f.addIfNotPresent(id, item)
+			err = e.Err
+		}
+		return item, err
+	}
+}
+
+// Replace will delete the contents of 'f', using instead the given map.
+// 'f' takes ownership of the map, you should not reference the map again
+// after calling this function. f's queue is reset, too; upon return, it
+// will contain the items in the map, in no particular order.
+func (f *FIFO) Replace(list []interface{}, resourceVersion string) error {
+	items := map[string]interface{}{}
+	for _, item := range list {
+		key, err := f.keyFunc(item)
+		if err != nil {
+			return KeyError{item, err}
+		}
+		items[key] = item
+	}
+
+	f.lock.Lock()
+	defer f.lock.Unlock()
+
+	if !f.populated {
+		f.populated = true
+		f.initialPopulationCount = len(items)
+	}
+
+	f.items = items
+	f.queue = f.queue[:0]
+	for id := range items {
+		f.queue = append(f.queue, id)
+	}
+	if len(f.queue) > 0 {
+		f.cond.Broadcast()
+	}
+	return nil
+}
+
+// Resync will touch all objects to put them into the processing queue
+func (f *FIFO) Resync() error {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+
+	inQueue := sets.NewString()
+	for _, id := range f.queue {
+		inQueue.Insert(id)
+	}
+	for id := range f.items {
+		if !inQueue.Has(id) {
+			f.queue = append(f.queue, id)
+		}
+	}
+	if len(f.queue) > 0 {
+		f.cond.Broadcast()
+	}
+	return nil
+}
+
+// NewFIFO returns a Store which can be used to queue up items to
+// process.
+func NewFIFO(keyFunc KeyFunc) *FIFO {
+	f := &FIFO{
+		items:   map[string]interface{}{},
+		queue:   []string{},
+		keyFunc: keyFunc,
+	}
+	f.cond.L = &f.lock
+	return f
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/heap.go b/metrics-server/vendor/k8s.io/client-go/tools/cache/heap.go
new file mode 100644
index 0000000..78e4924
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/heap.go
@@ -0,0 +1,323 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file implements a heap data structure.
+
+package cache
+
+import (
+	"container/heap"
+	"fmt"
+	"sync"
+)
+
+const (
+	closedMsg = "heap is closed"
+)
+
+type LessFunc func(interface{}, interface{}) bool
+type heapItem struct {
+	obj   interface{} // The object which is stored in the heap.
+	index int         // The index of the object's key in the Heap.queue.
+}
+
+type itemKeyValue struct {
+	key string
+	obj interface{}
+}
+
+// heapData is an internal struct that implements the standard heap interface
+// and keeps the data stored in the heap.
+type heapData struct {
+	// items is a map from key of the objects to the objects and their index.
+	// We depend on the property that items in the map are in the queue and vice versa.
+	items map[string]*heapItem
+	// queue implements a heap data structure and keeps the order of elements
+	// according to the heap invariant. The queue keeps the keys of objects stored
+	// in "items".
+	queue []string
+
+	// keyFunc is used to make the key used for queued item insertion and retrieval, and
+	// should be deterministic.
+	keyFunc KeyFunc
+	// lessFunc is used to compare two objects in the heap.
+	lessFunc LessFunc
+}
+
+var (
+	_ = heap.Interface(&heapData{}) // heapData is a standard heap
+)
+
+// Less compares two objects and returns true if the first one should go
+// in front of the second one in the heap.
+func (h *heapData) Less(i, j int) bool {
+	if i > len(h.queue) || j > len(h.queue) {
+		return false
+	}
+	itemi, ok := h.items[h.queue[i]]
+	if !ok {
+		return false
+	}
+	itemj, ok := h.items[h.queue[j]]
+	if !ok {
+		return false
+	}
+	return h.lessFunc(itemi.obj, itemj.obj)
+}
+
+// Len returns the number of items in the Heap.
+func (h *heapData) Len() int { return len(h.queue) }
+
+// Swap implements swapping of two elements in the heap. This is a part of standard
+// heap interface and should never be called directly.
+func (h *heapData) Swap(i, j int) {
+	h.queue[i], h.queue[j] = h.queue[j], h.queue[i]
+	item := h.items[h.queue[i]]
+	item.index = i
+	item = h.items[h.queue[j]]
+	item.index = j
+}
+
+// Push is supposed to be called by heap.Push only.
+func (h *heapData) Push(kv interface{}) {
+	keyValue := kv.(*itemKeyValue)
+	n := len(h.queue)
+	h.items[keyValue.key] = &heapItem{keyValue.obj, n}
+	h.queue = append(h.queue, keyValue.key)
+}
+
+// Pop is supposed to be called by heap.Pop only.
+func (h *heapData) Pop() interface{} {
+	key := h.queue[len(h.queue)-1]
+	h.queue = h.queue[0 : len(h.queue)-1]
+	item, ok := h.items[key]
+	if !ok {
+		// This is an error
+		return nil
+	}
+	delete(h.items, key)
+	return item.obj
+}
+
+// Heap is a thread-safe producer/consumer queue that implements a heap data structure.
+// It can be used to implement priority queues and similar data structures.
+type Heap struct {
+	lock sync.RWMutex
+	cond sync.Cond
+
+	// data stores objects and has a queue that keeps their ordering according
+	// to the heap invariant.
+	data *heapData
+
+	// closed indicates that the queue is closed.
+	// It is mainly used to let Pop() exit its control loop while waiting for an item.
+	closed bool
+}
+
+// Close the Heap and signals condition variables that may be waiting to pop
+// items from the heap.
+func (h *Heap) Close() {
+	h.lock.Lock()
+	defer h.lock.Unlock()
+	h.closed = true
+	h.cond.Broadcast()
+}
+
+// Add inserts an item, and puts it in the queue. The item is updated if it
+// already exists.
+func (h *Heap) Add(obj interface{}) error {
+	key, err := h.data.keyFunc(obj)
+	if err != nil {
+		return KeyError{obj, err}
+	}
+	h.lock.Lock()
+	defer h.lock.Unlock()
+	if h.closed {
+		return fmt.Errorf(closedMsg)
+	}
+	if _, exists := h.data.items[key]; exists {
+		h.data.items[key].obj = obj
+		heap.Fix(h.data, h.data.items[key].index)
+	} else {
+		h.addIfNotPresentLocked(key, obj)
+	}
+	h.cond.Broadcast()
+	return nil
+}
+
+// Adds all the items in the list to the queue and then signals the condition
+// variable. It is useful when the caller would like to add all of the items
+// to the queue before consumer starts processing them.
+func (h *Heap) BulkAdd(list []interface{}) error {
+	h.lock.Lock()
+	defer h.lock.Unlock()
+	if h.closed {
+		return fmt.Errorf(closedMsg)
+	}
+	for _, obj := range list {
+		key, err := h.data.keyFunc(obj)
+		if err != nil {
+			return KeyError{obj, err}
+		}
+		if _, exists := h.data.items[key]; exists {
+			h.data.items[key].obj = obj
+			heap.Fix(h.data, h.data.items[key].index)
+		} else {
+			h.addIfNotPresentLocked(key, obj)
+		}
+	}
+	h.cond.Broadcast()
+	return nil
+}
+
+// AddIfNotPresent inserts an item, and puts it in the queue. If an item with
+// the key is present in the map, no changes is made to the item.
+//
+// This is useful in a single producer/consumer scenario so that the consumer can
+// safely retry items without contending with the producer and potentially enqueueing
+// stale items.
+func (h *Heap) AddIfNotPresent(obj interface{}) error {
+	id, err := h.data.keyFunc(obj)
+	if err != nil {
+		return KeyError{obj, err}
+	}
+	h.lock.Lock()
+	defer h.lock.Unlock()
+	if h.closed {
+		return fmt.Errorf(closedMsg)
+	}
+	h.addIfNotPresentLocked(id, obj)
+	h.cond.Broadcast()
+	return nil
+}
+
+// addIfNotPresentLocked assumes the lock is already held and adds the the provided
+// item to the queue if it does not already exist.
+func (h *Heap) addIfNotPresentLocked(key string, obj interface{}) {
+	if _, exists := h.data.items[key]; exists {
+		return
+	}
+	heap.Push(h.data, &itemKeyValue{key, obj})
+}
+
+// Update is the same as Add in this implementation. When the item does not
+// exist, it is added.
+func (h *Heap) Update(obj interface{}) error {
+	return h.Add(obj)
+}
+
+// Delete removes an item.
+func (h *Heap) Delete(obj interface{}) error {
+	key, err := h.data.keyFunc(obj)
+	if err != nil {
+		return KeyError{obj, err}
+	}
+	h.lock.Lock()
+	defer h.lock.Unlock()
+	if item, ok := h.data.items[key]; ok {
+		heap.Remove(h.data, item.index)
+		return nil
+	}
+	return fmt.Errorf("object not found")
+}
+
+// Pop waits until an item is ready. If multiple items are
+// ready, they are returned in the order given by Heap.data.lessFunc.
+func (h *Heap) Pop() (interface{}, error) {
+	h.lock.Lock()
+	defer h.lock.Unlock()
+	for len(h.data.queue) == 0 {
+		// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
+		// When Close() is called, the h.closed is set and the condition is broadcast,
+		// which causes this loop to continue and return from the Pop().
+		if h.closed {
+			return nil, fmt.Errorf("heap is closed")
+		}
+		h.cond.Wait()
+	}
+	obj := heap.Pop(h.data)
+	if obj != nil {
+		return obj, nil
+	} else {
+		return nil, fmt.Errorf("object was removed from heap data")
+	}
+}
+
+// List returns a list of all the items.
+func (h *Heap) List() []interface{} {
+	h.lock.RLock()
+	defer h.lock.RUnlock()
+	list := make([]interface{}, 0, len(h.data.items))
+	for _, item := range h.data.items {
+		list = append(list, item.obj)
+	}
+	return list
+}
+
+// ListKeys returns a list of all the keys of the objects currently in the Heap.
+func (h *Heap) ListKeys() []string {
+	h.lock.RLock()
+	defer h.lock.RUnlock()
+	list := make([]string, 0, len(h.data.items))
+	for key := range h.data.items {
+		list = append(list, key)
+	}
+	return list
+}
+
+// Get returns the requested item, or sets exists=false.
+func (h *Heap) Get(obj interface{}) (interface{}, bool, error) {
+	key, err := h.data.keyFunc(obj)
+	if err != nil {
+		return nil, false, KeyError{obj, err}
+	}
+	return h.GetByKey(key)
+}
+
+// GetByKey returns the requested item, or sets exists=false.
+func (h *Heap) GetByKey(key string) (interface{}, bool, error) {
+	h.lock.RLock()
+	defer h.lock.RUnlock()
+	item, exists := h.data.items[key]
+	if !exists {
+		return nil, false, nil
+	}
+	return item.obj, true, nil
+}
+
+// IsClosed returns true if the queue is closed.
+func (h *Heap) IsClosed() bool {
+	h.lock.RLock()
+	defer h.lock.RUnlock()
+	if h.closed {
+		return true
+	}
+	return false
+}
+
+// NewHeap returns a Heap which can be used to queue up items to process.
+func NewHeap(keyFn KeyFunc, lessFn LessFunc) *Heap {
+	h := &Heap{
+		data: &heapData{
+			items:    map[string]*heapItem{},
+			queue:    []string{},
+			keyFunc:  keyFn,
+			lessFunc: lessFn,
+		},
+	}
+	h.cond.L = &h.lock
+	return h
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/index.go b/metrics-server/vendor/k8s.io/client-go/tools/cache/index.go
new file mode 100644
index 0000000..15acb16
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/index.go
@@ -0,0 +1,87 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/api/meta"
+	"k8s.io/apimachinery/pkg/util/sets"
+)
+
+// Indexer is a storage interface that lets you list objects using multiple indexing functions
+type Indexer interface {
+	Store
+	// Retrieve list of objects that match on the named indexing function
+	Index(indexName string, obj interface{}) ([]interface{}, error)
+	// IndexKeys returns the set of keys that match on the named indexing function.
+	IndexKeys(indexName, indexKey string) ([]string, error)
+	// ListIndexFuncValues returns the list of generated values of an Index func
+	ListIndexFuncValues(indexName string) []string
+	// ByIndex lists object that match on the named indexing function with the exact key
+	ByIndex(indexName, indexKey string) ([]interface{}, error)
+	// GetIndexer return the indexers
+	GetIndexers() Indexers
+
+	// AddIndexers adds more indexers to this store.  If you call this after you already have data
+	// in the store, the results are undefined.
+	AddIndexers(newIndexers Indexers) error
+}
+
+// IndexFunc knows how to provide an indexed value for an object.
+type IndexFunc func(obj interface{}) ([]string, error)
+
+// IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc.  This is only useful if your index function returns
+// unique values for every object.  This is conversion can create errors when more than one key is found.  You
+// should prefer to make proper key and index functions.
+func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc {
+	return func(obj interface{}) (string, error) {
+		indexKeys, err := indexFunc(obj)
+		if err != nil {
+			return "", err
+		}
+		if len(indexKeys) > 1 {
+			return "", fmt.Errorf("too many keys: %v", indexKeys)
+		}
+		if len(indexKeys) == 0 {
+			return "", fmt.Errorf("unexpected empty indexKeys")
+		}
+		return indexKeys[0], nil
+	}
+}
+
+const (
+	NamespaceIndex string = "namespace"
+)
+
+// MetaNamespaceIndexFunc is a default index function that indexes based on an object's namespace
+func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) {
+	meta, err := meta.Accessor(obj)
+	if err != nil {
+		return []string{""}, fmt.Errorf("object has no meta: %v", err)
+	}
+	return []string{meta.GetNamespace()}, nil
+}
+
+// Index maps the indexed value to a set of keys in the store that match on that value
+type Index map[string]sets.String
+
+// Indexers maps a name to a IndexFunc
+type Indexers map[string]IndexFunc
+
+// Indices maps a name to an Index
+type Indices map[string]Index
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/listers.go b/metrics-server/vendor/k8s.io/client-go/tools/cache/listers.go
new file mode 100644
index 0000000..27d51a6
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/listers.go
@@ -0,0 +1,160 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+	"github.com/golang/glog"
+
+	"k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/labels"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// AppendFunc is used to add a matching item to whatever list the caller is using
+type AppendFunc func(interface{})
+
+func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
+	for _, m := range store.List() {
+		metadata, err := meta.Accessor(m)
+		if err != nil {
+			return err
+		}
+		if selector.Matches(labels.Set(metadata.GetLabels())) {
+			appendFn(m)
+		}
+	}
+	return nil
+}
+
+func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error {
+	if namespace == metav1.NamespaceAll {
+		for _, m := range indexer.List() {
+			metadata, err := meta.Accessor(m)
+			if err != nil {
+				return err
+			}
+			if selector.Matches(labels.Set(metadata.GetLabels())) {
+				appendFn(m)
+			}
+		}
+		return nil
+	}
+
+	items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace})
+	if err != nil {
+		// Ignore error; do slow search without index.
+		glog.Warningf("can not retrieve list of objects using index : %v", err)
+		for _, m := range indexer.List() {
+			metadata, err := meta.Accessor(m)
+			if err != nil {
+				return err
+			}
+			if metadata.GetNamespace() == namespace && selector.Matches(labels.Set(metadata.GetLabels())) {
+				appendFn(m)
+			}
+
+		}
+		return nil
+	}
+	for _, m := range items {
+		metadata, err := meta.Accessor(m)
+		if err != nil {
+			return err
+		}
+		if selector.Matches(labels.Set(metadata.GetLabels())) {
+			appendFn(m)
+		}
+	}
+
+	return nil
+}
+
+// GenericLister is a lister skin on a generic Indexer
+type GenericLister interface {
+	// List will return all objects across namespaces
+	List(selector labels.Selector) (ret []runtime.Object, err error)
+	// Get will attempt to retrieve assuming that name==key
+	Get(name string) (runtime.Object, error)
+	// ByNamespace will give you a GenericNamespaceLister for one namespace
+	ByNamespace(namespace string) GenericNamespaceLister
+}
+
+// GenericNamespaceLister is a lister skin on a generic Indexer
+type GenericNamespaceLister interface {
+	// List will return all objects in this namespace
+	List(selector labels.Selector) (ret []runtime.Object, err error)
+	// Get will attempt to retrieve by namespace and name
+	Get(name string) (runtime.Object, error)
+}
+
+func NewGenericLister(indexer Indexer, resource schema.GroupResource) GenericLister {
+	return &genericLister{indexer: indexer, resource: resource}
+}
+
+type genericLister struct {
+	indexer  Indexer
+	resource schema.GroupResource
+}
+
+func (s *genericLister) List(selector labels.Selector) (ret []runtime.Object, err error) {
+	err = ListAll(s.indexer, selector, func(m interface{}) {
+		ret = append(ret, m.(runtime.Object))
+	})
+	return ret, err
+}
+
+func (s *genericLister) ByNamespace(namespace string) GenericNamespaceLister {
+	return &genericNamespaceLister{indexer: s.indexer, namespace: namespace, resource: s.resource}
+}
+
+func (s *genericLister) Get(name string) (runtime.Object, error) {
+	obj, exists, err := s.indexer.GetByKey(name)
+	if err != nil {
+		return nil, err
+	}
+	if !exists {
+		return nil, errors.NewNotFound(s.resource, name)
+	}
+	return obj.(runtime.Object), nil
+}
+
+type genericNamespaceLister struct {
+	indexer   Indexer
+	namespace string
+	resource  schema.GroupResource
+}
+
+func (s *genericNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) {
+	err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+		ret = append(ret, m.(runtime.Object))
+	})
+	return ret, err
+}
+
+func (s *genericNamespaceLister) Get(name string) (runtime.Object, error) {
+	obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+	if err != nil {
+		return nil, err
+	}
+	if !exists {
+		return nil, errors.NewNotFound(s.resource, name)
+	}
+	return obj.(runtime.Object), nil
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/listwatch.go b/metrics-server/vendor/k8s.io/client-go/tools/cache/listwatch.go
new file mode 100644
index 0000000..8bf41f5
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/listwatch.go
@@ -0,0 +1,187 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+	"context"
+	"time"
+
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/fields"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/wait"
+	"k8s.io/apimachinery/pkg/watch"
+	restclient "k8s.io/client-go/rest"
+	"k8s.io/client-go/tools/pager"
+)
+
+// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource.
+type ListerWatcher interface {
+	// List should return a list type object; the Items field will be extracted, and the
+	// ResourceVersion field will be used to start the watch in the right place.
+	List(options metav1.ListOptions) (runtime.Object, error)
+	// Watch should begin a watch at the specified version.
+	Watch(options metav1.ListOptions) (watch.Interface, error)
+}
+
+// ListFunc knows how to list resources
+type ListFunc func(options metav1.ListOptions) (runtime.Object, error)
+
+// WatchFunc knows how to watch resources
+type WatchFunc func(options metav1.ListOptions) (watch.Interface, error)
+
+// ListWatch knows how to list and watch a set of apiserver resources.  It satisfies the ListerWatcher interface.
+// It is a convenience function for users of NewReflector, etc.
+// ListFunc and WatchFunc must not be nil
+type ListWatch struct {
+	ListFunc  ListFunc
+	WatchFunc WatchFunc
+	// DisableChunking requests no chunking for this list watcher.
+	DisableChunking bool
+}
+
+// Getter interface knows how to access Get method from RESTClient.
+type Getter interface {
+	Get() *restclient.Request
+}
+
+// NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector.
+func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSelector fields.Selector) *ListWatch {
+	optionsModifier := func(options *metav1.ListOptions) {
+		options.FieldSelector = fieldSelector.String()
+	}
+	return NewFilteredListWatchFromClient(c, resource, namespace, optionsModifier)
+}
+
+// NewFilteredListWatchFromClient creates a new ListWatch from the specified client, resource, namespace, and option modifier.
+// Option modifier is a function takes a ListOptions and modifies the consumed ListOptions. Provide customized modifier function
+// to apply modification to ListOptions with a field selector, a label selector, or any other desired options.
+func NewFilteredListWatchFromClient(c Getter, resource string, namespace string, optionsModifier func(options *metav1.ListOptions)) *ListWatch {
+	listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
+		optionsModifier(&options)
+		return c.Get().
+			Namespace(namespace).
+			Resource(resource).
+			VersionedParams(&options, metav1.ParameterCodec).
+			Do().
+			Get()
+	}
+	watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
+		options.Watch = true
+		optionsModifier(&options)
+		return c.Get().
+			Namespace(namespace).
+			Resource(resource).
+			VersionedParams(&options, metav1.ParameterCodec).
+			Watch()
+	}
+	return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
+}
+
+func timeoutFromListOptions(options metav1.ListOptions) time.Duration {
+	if options.TimeoutSeconds != nil {
+		return time.Duration(*options.TimeoutSeconds) * time.Second
+	}
+	return 0
+}
+
+// List a set of apiserver resources
+func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) {
+	if !lw.DisableChunking {
+		return pager.New(pager.SimplePageFunc(lw.ListFunc)).List(context.TODO(), options)
+	}
+	return lw.ListFunc(options)
+}
+
+// Watch a set of apiserver resources
+func (lw *ListWatch) Watch(options metav1.ListOptions) (watch.Interface, error) {
+	return lw.WatchFunc(options)
+}
+
+// ListWatchUntil checks the provided conditions against the items returned by the list watcher, returning wait.ErrWaitTimeout
+// if timeout is exceeded without all conditions returning true, or an error if an error occurs.
+// TODO: check for watch expired error and retry watch from latest point?  Same issue exists for Until.
+func ListWatchUntil(timeout time.Duration, lw ListerWatcher, conditions ...watch.ConditionFunc) (*watch.Event, error) {
+	if len(conditions) == 0 {
+		return nil, nil
+	}
+
+	list, err := lw.List(metav1.ListOptions{})
+	if err != nil {
+		return nil, err
+	}
+	initialItems, err := meta.ExtractList(list)
+	if err != nil {
+		return nil, err
+	}
+
+	// use the initial items as simulated "adds"
+	var lastEvent *watch.Event
+	currIndex := 0
+	passedConditions := 0
+	for _, condition := range conditions {
+		// check the next condition against the previous event and short circuit waiting for the next watch
+		if lastEvent != nil {
+			done, err := condition(*lastEvent)
+			if err != nil {
+				return lastEvent, err
+			}
+			if done {
+				passedConditions = passedConditions + 1
+				continue
+			}
+		}
+
+	ConditionSucceeded:
+		for currIndex < len(initialItems) {
+			lastEvent = &watch.Event{Type: watch.Added, Object: initialItems[currIndex]}
+			currIndex++
+
+			done, err := condition(*lastEvent)
+			if err != nil {
+				return lastEvent, err
+			}
+			if done {
+				passedConditions = passedConditions + 1
+				break ConditionSucceeded
+			}
+		}
+	}
+	if passedConditions == len(conditions) {
+		return lastEvent, nil
+	}
+	remainingConditions := conditions[passedConditions:]
+
+	metaObj, err := meta.ListAccessor(list)
+	if err != nil {
+		return nil, err
+	}
+	currResourceVersion := metaObj.GetResourceVersion()
+
+	watchInterface, err := lw.Watch(metav1.ListOptions{ResourceVersion: currResourceVersion})
+	if err != nil {
+		return nil, err
+	}
+
+	evt, err := watch.Until(timeout, watchInterface, remainingConditions...)
+	if err == watch.ErrWatchClosed {
+		// present a consistent error interface to callers
+		err = wait.ErrWaitTimeout
+	}
+	return evt, err
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/mutation_cache.go b/metrics-server/vendor/k8s.io/client-go/tools/cache/mutation_cache.go
new file mode 100644
index 0000000..cbb6434
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/mutation_cache.go
@@ -0,0 +1,261 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+	"fmt"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/golang/glog"
+
+	"k8s.io/apimachinery/pkg/api/meta"
+	"k8s.io/apimachinery/pkg/runtime"
+	utilcache "k8s.io/apimachinery/pkg/util/cache"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/apimachinery/pkg/util/sets"
+)
+
+// MutationCache is able to take the result of update operations and stores them in an LRU
+// that can be used to provide a more current view of a requested object.  It requires interpreting
+// resourceVersions for comparisons.
+// Implementations must be thread-safe.
+// TODO find a way to layer this into an informer/lister
+type MutationCache interface {
+	GetByKey(key string) (interface{}, bool, error)
+	ByIndex(indexName, indexKey string) ([]interface{}, error)
+	Mutation(interface{})
+}
+
+type ResourceVersionComparator interface {
+	CompareResourceVersion(lhs, rhs runtime.Object) int
+}
+
+// NewIntegerResourceVersionMutationCache returns a MutationCache that understands how to
+// deal with objects that have a resource version that:
+//
+//   - is an integer
+//   - increases when updated
+//   - is comparable across the same resource in a namespace
+//
+// Most backends will have these semantics. Indexer may be nil. ttl controls how long an item
+// remains in the mutation cache before it is removed.
+//
+// If includeAdds is true, objects in the mutation cache will be returned even if they don't exist
+// in the underlying store. This is only safe if your use of the cache can handle mutation entries
+// remaining in the cache for up to ttl when mutations and deletes occur very closely in time.
+func NewIntegerResourceVersionMutationCache(backingCache Store, indexer Indexer, ttl time.Duration, includeAdds bool) MutationCache {
+	return &mutationCache{
+		backingCache:  backingCache,
+		indexer:       indexer,
+		mutationCache: utilcache.NewLRUExpireCache(100),
+		comparator:    etcdObjectVersioner{},
+		ttl:           ttl,
+		includeAdds:   includeAdds,
+	}
+}
+
+// mutationCache doesn't guarantee that it returns values added via Mutation since they can page out and
+// since you can't distinguish between, "didn't observe create" and "was deleted after create",
+// if the key is missing from the backing cache, we always return it as missing
+type mutationCache struct {
+	lock          sync.Mutex
+	backingCache  Store
+	indexer       Indexer
+	mutationCache *utilcache.LRUExpireCache
+	includeAdds   bool
+	ttl           time.Duration
+
+	comparator ResourceVersionComparator
+}
+
+// GetByKey is never guaranteed to return back the value set in Mutation.  It could be paged out, it could
+// be older than another copy, the backingCache may be more recent or, you might have written twice into the same key.
+// You get a value that was valid at some snapshot of time and will always return the newer of backingCache and mutationCache.
+func (c *mutationCache) GetByKey(key string) (interface{}, bool, error) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	obj, exists, err := c.backingCache.GetByKey(key)
+	if err != nil {
+		return nil, false, err
+	}
+	if !exists {
+		if !c.includeAdds {
+			// we can't distinguish between, "didn't observe create" and "was deleted after create", so
+			// if the key is missing, we always return it as missing
+			return nil, false, nil
+		}
+		obj, exists = c.mutationCache.Get(key)
+		if !exists {
+			return nil, false, nil
+		}
+	}
+	objRuntime, ok := obj.(runtime.Object)
+	if !ok {
+		return obj, true, nil
+	}
+	return c.newerObject(key, objRuntime), true, nil
+}
+
+// ByIndex returns the newer objects that match the provided index and indexer key.
+// Will return an error if no indexer was provided.
+func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, error) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+	if c.indexer == nil {
+		return nil, fmt.Errorf("no indexer has been provided to the mutation cache")
+	}
+	keys, err := c.indexer.IndexKeys(name, indexKey)
+	if err != nil {
+		return nil, err
+	}
+	var items []interface{}
+	keySet := sets.NewString()
+	for _, key := range keys {
+		keySet.Insert(key)
+		obj, exists, err := c.indexer.GetByKey(key)
+		if err != nil {
+			return nil, err
+		}
+		if !exists {
+			continue
+		}
+		if objRuntime, ok := obj.(runtime.Object); ok {
+			items = append(items, c.newerObject(key, objRuntime))
+		} else {
+			items = append(items, obj)
+		}
+	}
+
+	if c.includeAdds {
+		fn := c.indexer.GetIndexers()[name]
+		// Keys() is returned oldest to newest, so full traversal does not alter the LRU behavior
+		for _, key := range c.mutationCache.Keys() {
+			updated, ok := c.mutationCache.Get(key)
+			if !ok {
+				continue
+			}
+			if keySet.Has(key.(string)) {
+				continue
+			}
+			elements, err := fn(updated)
+			if err != nil {
+				glog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err)
+				continue
+			}
+			for _, inIndex := range elements {
+				if inIndex != indexKey {
+					continue
+				}
+				items = append(items, updated)
+				break
+			}
+		}
+	}
+
+	return items, nil
+}
+
+// newerObject checks the mutation cache for a newer object and returns one if found. If the
+// mutated object is older than the backing object, it is removed from the  Must be
+// called while the lock is held.
+func (c *mutationCache) newerObject(key string, backing runtime.Object) runtime.Object {
+	mutatedObj, exists := c.mutationCache.Get(key)
+	if !exists {
+		return backing
+	}
+	mutatedObjRuntime, ok := mutatedObj.(runtime.Object)
+	if !ok {
+		return backing
+	}
+	if c.comparator.CompareResourceVersion(backing, mutatedObjRuntime) >= 0 {
+		c.mutationCache.Remove(key)
+		return backing
+	}
+	return mutatedObjRuntime
+}
+
+// Mutation adds a change to the cache that can be returned in GetByKey if it is newer than the backingCache
+// copy.  If you call Mutation twice with the same object on different threads, one will win, but its not defined
+// which one.  This doesn't affect correctness, since the GetByKey guaranteed of "later of these two caches" is
+// preserved, but you may not get the version of the object you want.  The object you get is only guaranteed to
+// "one that was valid at some point in time", not "the one that I want".
+func (c *mutationCache) Mutation(obj interface{}) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	key, err := DeletionHandlingMetaNamespaceKeyFunc(obj)
+	if err != nil {
+		// this is a "nice to have", so failures shouldn't do anything weird
+		utilruntime.HandleError(err)
+		return
+	}
+
+	if objRuntime, ok := obj.(runtime.Object); ok {
+		if mutatedObj, exists := c.mutationCache.Get(key); exists {
+			if mutatedObjRuntime, ok := mutatedObj.(runtime.Object); ok {
+				if c.comparator.CompareResourceVersion(objRuntime, mutatedObjRuntime) < 0 {
+					return
+				}
+			}
+		}
+	}
+	c.mutationCache.Add(key, obj, c.ttl)
+}
+
+// etcdObjectVersioner implements versioning and extracting etcd node information
+// for objects that have an embedded ObjectMeta or ListMeta field.
+type etcdObjectVersioner struct{}
+
+// ObjectResourceVersion implements Versioner
+func (a etcdObjectVersioner) ObjectResourceVersion(obj runtime.Object) (uint64, error) {
+	accessor, err := meta.Accessor(obj)
+	if err != nil {
+		return 0, err
+	}
+	version := accessor.GetResourceVersion()
+	if len(version) == 0 {
+		return 0, nil
+	}
+	return strconv.ParseUint(version, 10, 64)
+}
+
+// CompareResourceVersion compares etcd resource versions.  Outside this API they are all strings,
+// but etcd resource versions are special, they're actually ints, so we can easily compare them.
+func (a etcdObjectVersioner) CompareResourceVersion(lhs, rhs runtime.Object) int {
+	lhsVersion, err := a.ObjectResourceVersion(lhs)
+	if err != nil {
+		// coder error
+		panic(err)
+	}
+	rhsVersion, err := a.ObjectResourceVersion(rhs)
+	if err != nil {
+		// coder error
+		panic(err)
+	}
+
+	if lhsVersion == rhsVersion {
+		return 0
+	}
+	if lhsVersion < rhsVersion {
+		return -1
+	}
+
+	return 1
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/mutation_detector.go b/metrics-server/vendor/k8s.io/client-go/tools/cache/mutation_detector.go
new file mode 100644
index 0000000..e2aa448
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/mutation_detector.go
@@ -0,0 +1,130 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+	"fmt"
+	"os"
+	"reflect"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/golang/glog"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/diff"
+)
+
+var mutationDetectionEnabled = false
+
+func init() {
+	mutationDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_CACHE_MUTATION_DETECTOR"))
+}
+
+type CacheMutationDetector interface {
+	AddObject(obj interface{})
+	Run(stopCh <-chan struct{})
+}
+
+func NewCacheMutationDetector(name string) CacheMutationDetector {
+	if !mutationDetectionEnabled {
+		return dummyMutationDetector{}
+	}
+	glog.Warningln("Mutation detector is enabled, this will result in memory leakage.")
+	return &defaultCacheMutationDetector{name: name, period: 1 * time.Second}
+}
+
+type dummyMutationDetector struct{}
+
+func (dummyMutationDetector) Run(stopCh <-chan struct{}) {
+}
+func (dummyMutationDetector) AddObject(obj interface{}) {
+}
+
+// defaultCacheMutationDetector gives a way to detect if a cached object has been mutated
+// It has a list of cached objects and their copies.  I haven't thought of a way
+// to see WHO is mutating it, just that it's getting mutated.
+type defaultCacheMutationDetector struct {
+	name   string
+	period time.Duration
+
+	lock       sync.Mutex
+	cachedObjs []cacheObj
+
+	// failureFunc is injectable for unit testing.  If you don't have it, the process will panic.
+	// This panic is intentional, since turning on this detection indicates you want a strong
+	// failure signal.  This failure is effectively a p0 bug and you can't trust process results
+	// after a mutation anyway.
+	failureFunc func(message string)
+}
+
+// cacheObj holds the actual object and a copy
+type cacheObj struct {
+	cached interface{}
+	copied interface{}
+}
+
+func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) {
+	// we DON'T want protection from panics.  If we're running this code, we want to die
+	for {
+		d.CompareObjects()
+
+		select {
+		case <-stopCh:
+			return
+		case <-time.After(d.period):
+		}
+	}
+}
+
+// AddObject makes a deep copy of the object for later comparison.  It only works on runtime.Object
+// but that covers the vast majority of our cached objects
+func (d *defaultCacheMutationDetector) AddObject(obj interface{}) {
+	if _, ok := obj.(DeletedFinalStateUnknown); ok {
+		return
+	}
+	if obj, ok := obj.(runtime.Object); ok {
+		copiedObj := obj.DeepCopyObject()
+
+		d.lock.Lock()
+		defer d.lock.Unlock()
+		d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj})
+	}
+}
+
+func (d *defaultCacheMutationDetector) CompareObjects() {
+	d.lock.Lock()
+	defer d.lock.Unlock()
+
+	altered := false
+	for i, obj := range d.cachedObjs {
+		if !reflect.DeepEqual(obj.cached, obj.copied) {
+			fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectDiff(obj.cached, obj.copied))
+			altered = true
+		}
+	}
+
+	if altered {
+		msg := fmt.Sprintf("cache %s modified", d.name)
+		if d.failureFunc != nil {
+			d.failureFunc(msg)
+			return
+		}
+		panic(msg)
+	}
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/reflector.go b/metrics-server/vendor/k8s.io/client-go/tools/cache/reflector.go
new file mode 100644
index 0000000..054a737
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/reflector.go
@@ -0,0 +1,449 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math/rand"
+	"net"
+	"net/url"
+	"reflect"
+	"regexp"
+	goruntime "runtime"
+	"runtime/debug"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"syscall"
+	"time"
+
+	"github.com/golang/glog"
+	apierrs "k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/clock"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/apimachinery/pkg/util/wait"
+	"k8s.io/apimachinery/pkg/watch"
+)
+
+// Reflector watches a specified resource and causes all changes to be reflected in the given store.
+type Reflector struct {
+	// name identifies this reflector. By default it will be a file:line if possible.
+	name string
+	// metrics tracks basic metric information about the reflector
+	metrics *reflectorMetrics
+
+	// The type of object we expect to place in the store.
+	expectedType reflect.Type
+	// The destination to sync up with the watch source
+	store Store
+	// listerWatcher is used to perform lists and watches.
+	listerWatcher ListerWatcher
+	// period controls timing between one watch ending and
+	// the beginning of the next one.
+	period       time.Duration
+	resyncPeriod time.Duration
+	ShouldResync func() bool
+	// clock allows tests to manipulate time
+	clock clock.Clock
+	// lastSyncResourceVersion is the resource version token last
+	// observed when doing a sync with the underlying store
+	// it is thread safe, but not synchronized with the underlying store
+	lastSyncResourceVersion string
+	// lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion
+	lastSyncResourceVersionMutex sync.RWMutex
+}
+
+var (
+	// We try to spread the load on apiserver by setting timeouts for
+	// watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout].
+	// However, it can be modified to avoid periodic resync to break the
+	// TCP connection.
+	minWatchTimeout = 5 * time.Minute
+)
+
+// NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector
+// The indexer is configured to key on namespace
+func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) {
+	indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{"namespace": MetaNamespaceIndexFunc})
+	reflector = NewReflector(lw, expectedType, indexer, resyncPeriod)
+	return indexer, reflector
+}
+
+// NewReflector creates a new Reflector object which will keep the given store up to
+// date with the server's contents for the given resource. Reflector promises to
+// only put things in the store that have the type of expectedType, unless expectedType
+// is nil. If resyncPeriod is non-zero, then lists will be executed after every
+// resyncPeriod, so that you can use reflectors to periodically process everything as
+// well as incrementally processing the things that change.
+func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
+	return NewNamedReflector(getDefaultReflectorName(internalPackages...), lw, expectedType, store, resyncPeriod)
+}
+
+// reflectorDisambiguator is used to disambiguate started reflectors.
+// initialized to an unstable value to ensure meaning isn't attributed to the suffix.
+var reflectorDisambiguator = int64(time.Now().UnixNano() % 12345)
+
+// NewNamedReflector same as NewReflector, but with a specified name for logging
+func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
+	reflectorSuffix := atomic.AddInt64(&reflectorDisambiguator, 1)
+	r := &Reflector{
+		name: name,
+		// we need this to be unique per process (some names are still the same) but obvious who it belongs to
+		metrics:       newReflectorMetrics(makeValidPrometheusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))),
+		listerWatcher: lw,
+		store:         store,
+		expectedType:  reflect.TypeOf(expectedType),
+		period:        time.Second,
+		resyncPeriod:  resyncPeriod,
+		clock:         &clock.RealClock{},
+	}
+	return r
+}
+
+func makeValidPrometheusMetricLabel(in string) string {
+	// this isn't perfect, but it removes our common characters
+	return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in)
+}
+
+// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
+// call chains to NewReflector, so they'd be low entropy names for reflectors
+var internalPackages = []string{"client-go/tools/cache/", "/runtime/asm_"}
+
+// getDefaultReflectorName walks back through the call stack until we find a caller from outside of the ignoredPackages
+// it returns back a shortpath/filename:line to aid in identification of this reflector when it starts logging
+func getDefaultReflectorName(ignoredPackages ...string) string {
+	name := "????"
+	const maxStack = 10
+	for i := 1; i < maxStack; i++ {
+		_, file, line, ok := goruntime.Caller(i)
+		if !ok {
+			file, line, ok = extractStackCreator()
+			if !ok {
+				break
+			}
+			i += maxStack
+		}
+		if hasPackage(file, ignoredPackages) {
+			continue
+		}
+
+		file = trimPackagePrefix(file)
+		name = fmt.Sprintf("%s:%d", file, line)
+		break
+	}
+	return name
+}
+
+// hasPackage returns true if the file is in one of the ignored packages.
+func hasPackage(file string, ignoredPackages []string) bool {
+	for _, ignoredPackage := range ignoredPackages {
+		if strings.Contains(file, ignoredPackage) {
+			return true
+		}
+	}
+	return false
+}
+
+// trimPackagePrefix reduces duplicate values off the front of a package name.
+func trimPackagePrefix(file string) string {
+	if l := strings.LastIndex(file, "k8s.io/client-go/pkg/"); l >= 0 {
+		return file[l+len("k8s.io/client-go/"):]
+	}
+	if l := strings.LastIndex(file, "/src/"); l >= 0 {
+		return file[l+5:]
+	}
+	if l := strings.LastIndex(file, "/pkg/"); l >= 0 {
+		return file[l+1:]
+	}
+	return file
+}
+
+var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[[:xdigit:]]+$`)
+
+// extractStackCreator retrieves the goroutine file and line that launched this stack. Returns false
+// if the creator cannot be located.
+// TODO: Go does not expose this via runtime https://github.com/golang/go/issues/11440
+func extractStackCreator() (string, int, bool) {
+	stack := debug.Stack()
+	matches := stackCreator.FindStringSubmatch(string(stack))
+	if matches == nil || len(matches) != 4 {
+		return "", 0, false
+	}
+	line, err := strconv.Atoi(matches[3])
+	if err != nil {
+		return "", 0, false
+	}
+	return matches[2], line, true
+}
+
+// Run starts a watch and handles watch events. Will restart the watch if it is closed.
+// Run will exit when stopCh is closed.
+func (r *Reflector) Run(stopCh <-chan struct{}) {
+	glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
+	wait.Until(func() {
+		if err := r.ListAndWatch(stopCh); err != nil {
+			utilruntime.HandleError(err)
+		}
+	}, r.period, stopCh)
+}
+
+var (
+	// nothing will ever be sent down this channel
+	neverExitWatch <-chan time.Time = make(chan time.Time)
+
+	// Used to indicate that watching stopped so that a resync could happen.
+	errorResyncRequested = errors.New("resync channel fired")
+
+	// Used to indicate that watching stopped because of a signal from the stop
+	// channel passed in from a client of the reflector.
+	errorStopRequested = errors.New("Stop requested")
+)
+
+// resyncChan returns a channel which will receive something when a resync is
+// required, and a cleanup function.
+func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) {
+	if r.resyncPeriod == 0 {
+		return neverExitWatch, func() bool { return false }
+	}
+	// The cleanup function is required: imagine the scenario where watches
+	// always fail so we end up listing frequently. Then, if we don't
+	// manually stop the timer, we could end up with many timers active
+	// concurrently.
+	t := r.clock.NewTimer(r.resyncPeriod)
+	return t.C(), t.Stop
+}
+
+// ListAndWatch first lists all items and get the resource version at the moment of call,
+// and then use the resource version to watch.
+// It returns error if ListAndWatch didn't even try to initialize watch.
+func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
+	glog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name)
+	var resourceVersion string
+
+	// Explicitly set "0" as resource version - it's fine for the List()
+	// to be served from cache and potentially be delayed relative to
+	// etcd contents. Reflector framework will catch up via Watch() eventually.
+	options := metav1.ListOptions{ResourceVersion: "0"}
+	r.metrics.numberOfLists.Inc()
+	start := r.clock.Now()
+	list, err := r.listerWatcher.List(options)
+	if err != nil {
+		return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err)
+	}
+	r.metrics.listDuration.Observe(time.Since(start).Seconds())
+	listMetaInterface, err := meta.ListAccessor(list)
+	if err != nil {
+		return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err)
+	}
+	resourceVersion = listMetaInterface.GetResourceVersion()
+	items, err := meta.ExtractList(list)
+	if err != nil {
+		return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err)
+	}
+	r.metrics.numberOfItemsInList.Observe(float64(len(items)))
+	if err := r.syncWith(items, resourceVersion); err != nil {
+		return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err)
+	}
+	r.setLastSyncResourceVersion(resourceVersion)
+
+	resyncerrc := make(chan error, 1)
+	cancelCh := make(chan struct{})
+	defer close(cancelCh)
+	go func() {
+		resyncCh, cleanup := r.resyncChan()
+		defer func() {
+			cleanup() // Call the last one written into cleanup
+		}()
+		for {
+			select {
+			case <-resyncCh:
+			case <-stopCh:
+				return
+			case <-cancelCh:
+				return
+			}
+			if r.ShouldResync == nil || r.ShouldResync() {
+				glog.V(4).Infof("%s: forcing resync", r.name)
+				if err := r.store.Resync(); err != nil {
+					resyncerrc <- err
+					return
+				}
+			}
+			cleanup()
+			resyncCh, cleanup = r.resyncChan()
+		}
+	}()
+
+	for {
+		// give the stopCh a chance to stop the loop, even in case of continue statements further down on errors
+		select {
+		case <-stopCh:
+			return nil
+		default:
+		}
+
+		timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
+		options = metav1.ListOptions{
+			ResourceVersion: resourceVersion,
+			// We want to avoid situations of hanging watchers. Stop any wachers that do not
+			// receive any events within the timeout window.
+			TimeoutSeconds: &timeoutSeconds,
+		}
+
+		r.metrics.numberOfWatches.Inc()
+		w, err := r.listerWatcher.Watch(options)
+		if err != nil {
+			switch err {
+			case io.EOF:
+				// watch closed normally
+			case io.ErrUnexpectedEOF:
+				glog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err)
+			default:
+				utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err))
+			}
+			// If this is "connection refused" error, it means that most likely apiserver is not responsive.
+			// It doesn't make sense to re-list all objects because most likely we will be able to restart
+			// watch where we ended.
+			// If that's the case wait and resend watch request.
+			if urlError, ok := err.(*url.Error); ok {
+				if opError, ok := urlError.Err.(*net.OpError); ok {
+					if errno, ok := opError.Err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED {
+						time.Sleep(time.Second)
+						continue
+					}
+				}
+			}
+			return nil
+		}
+
+		if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil {
+			if err != errorStopRequested {
+				glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
+			}
+			return nil
+		}
+	}
+}
+
+// syncWith replaces the store's items with the given list.
+func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error {
+	found := make([]interface{}, 0, len(items))
+	for _, item := range items {
+		found = append(found, item)
+	}
+	return r.store.Replace(found, resourceVersion)
+}
+
+// watchHandler watches w and keeps *resourceVersion up to date.
+func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error {
+	start := r.clock.Now()
+	eventCount := 0
+
+	// Stopping the watcher should be idempotent and if we return from this function there's no way
+	// we're coming back in with the same watch interface.
+	defer w.Stop()
+	// update metrics
+	defer func() {
+		r.metrics.numberOfItemsInWatch.Observe(float64(eventCount))
+		r.metrics.watchDuration.Observe(time.Since(start).Seconds())
+	}()
+
+loop:
+	for {
+		select {
+		case <-stopCh:
+			return errorStopRequested
+		case err := <-errc:
+			return err
+		case event, ok := <-w.ResultChan():
+			if !ok {
+				break loop
+			}
+			if event.Type == watch.Error {
+				return apierrs.FromObject(event.Object)
+			}
+			if e, a := r.expectedType, reflect.TypeOf(event.Object); e != nil && e != a {
+				utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a))
+				continue
+			}
+			meta, err := meta.Accessor(event.Object)
+			if err != nil {
+				utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
+				continue
+			}
+			newResourceVersion := meta.GetResourceVersion()
+			switch event.Type {
+			case watch.Added:
+				err := r.store.Add(event.Object)
+				if err != nil {
+					utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", r.name, event.Object, err))
+				}
+			case watch.Modified:
+				err := r.store.Update(event.Object)
+				if err != nil {
+					utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", r.name, event.Object, err))
+				}
+			case watch.Deleted:
+				// TODO: Will any consumers need access to the "last known
+				// state", which is passed in event.Object? If so, may need
+				// to change this.
+				err := r.store.Delete(event.Object)
+				if err != nil {
+					utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err))
+				}
+			default:
+				utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
+			}
+			*resourceVersion = newResourceVersion
+			r.setLastSyncResourceVersion(newResourceVersion)
+			eventCount++
+		}
+	}
+
+	watchDuration := r.clock.Now().Sub(start)
+	if watchDuration < 1*time.Second && eventCount == 0 {
+		r.metrics.numberOfShortWatches.Inc()
+		return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
+	}
+	glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
+	return nil
+}
+
+// LastSyncResourceVersion is the resource version observed when last sync with the underlying store
+// The value returned is not synchronized with access to the underlying store and is not thread-safe
+func (r *Reflector) LastSyncResourceVersion() string {
+	r.lastSyncResourceVersionMutex.RLock()
+	defer r.lastSyncResourceVersionMutex.RUnlock()
+	return r.lastSyncResourceVersion
+}
+
+func (r *Reflector) setLastSyncResourceVersion(v string) {
+	r.lastSyncResourceVersionMutex.Lock()
+	defer r.lastSyncResourceVersionMutex.Unlock()
+	r.lastSyncResourceVersion = v
+
+	rv, err := strconv.Atoi(v)
+	if err == nil {
+		r.metrics.lastResourceVersion.Set(float64(rv))
+	}
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go b/metrics-server/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go
new file mode 100644
index 0000000..0945e5c
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go
@@ -0,0 +1,119 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file provides abstractions for setting the provider (e.g., prometheus)
+// of metrics.
+
+package cache
+
+import (
+	"sync"
+)
+
+// GaugeMetric represents a single numerical value that can arbitrarily go up
+// and down.
+type GaugeMetric interface {
+	Set(float64)
+}
+
+// CounterMetric represents a single numerical value that only ever
+// goes up.
+type CounterMetric interface {
+	Inc()
+}
+
+// SummaryMetric captures individual observations.
+type SummaryMetric interface {
+	Observe(float64)
+}
+
+type noopMetric struct{}
+
+func (noopMetric) Inc()            {}
+func (noopMetric) Dec()            {}
+func (noopMetric) Observe(float64) {}
+func (noopMetric) Set(float64)     {}
+
+type reflectorMetrics struct {
+	numberOfLists       CounterMetric
+	listDuration        SummaryMetric
+	numberOfItemsInList SummaryMetric
+
+	numberOfWatches      CounterMetric
+	numberOfShortWatches CounterMetric
+	watchDuration        SummaryMetric
+	numberOfItemsInWatch SummaryMetric
+
+	lastResourceVersion GaugeMetric
+}
+
+// MetricsProvider generates various metrics used by the reflector.
+type MetricsProvider interface {
+	NewListsMetric(name string) CounterMetric
+	NewListDurationMetric(name string) SummaryMetric
+	NewItemsInListMetric(name string) SummaryMetric
+
+	NewWatchesMetric(name string) CounterMetric
+	NewShortWatchesMetric(name string) CounterMetric
+	NewWatchDurationMetric(name string) SummaryMetric
+	NewItemsInWatchMetric(name string) SummaryMetric
+
+	NewLastResourceVersionMetric(name string) GaugeMetric
+}
+
+type noopMetricsProvider struct{}
+
+func (noopMetricsProvider) NewListsMetric(name string) CounterMetric         { return noopMetric{} }
+func (noopMetricsProvider) NewListDurationMetric(name string) SummaryMetric  { return noopMetric{} }
+func (noopMetricsProvider) NewItemsInListMetric(name string) SummaryMetric   { return noopMetric{} }
+func (noopMetricsProvider) NewWatchesMetric(name string) CounterMetric       { return noopMetric{} }
+func (noopMetricsProvider) NewShortWatchesMetric(name string) CounterMetric  { return noopMetric{} }
+func (noopMetricsProvider) NewWatchDurationMetric(name string) SummaryMetric { return noopMetric{} }
+func (noopMetricsProvider) NewItemsInWatchMetric(name string) SummaryMetric  { return noopMetric{} }
+func (noopMetricsProvider) NewLastResourceVersionMetric(name string) GaugeMetric {
+	return noopMetric{}
+}
+
+var metricsFactory = struct {
+	metricsProvider MetricsProvider
+	setProviders    sync.Once
+}{
+	metricsProvider: noopMetricsProvider{},
+}
+
+func newReflectorMetrics(name string) *reflectorMetrics {
+	var ret *reflectorMetrics
+	if len(name) == 0 {
+		return ret
+	}
+	return &reflectorMetrics{
+		numberOfLists:        metricsFactory.metricsProvider.NewListsMetric(name),
+		listDuration:         metricsFactory.metricsProvider.NewListDurationMetric(name),
+		numberOfItemsInList:  metricsFactory.metricsProvider.NewItemsInListMetric(name),
+		numberOfWatches:      metricsFactory.metricsProvider.NewWatchesMetric(name),
+		numberOfShortWatches: metricsFactory.metricsProvider.NewShortWatchesMetric(name),
+		watchDuration:        metricsFactory.metricsProvider.NewWatchDurationMetric(name),
+		numberOfItemsInWatch: metricsFactory.metricsProvider.NewItemsInWatchMetric(name),
+		lastResourceVersion:  metricsFactory.metricsProvider.NewLastResourceVersionMetric(name),
+	}
+}
+
+// SetReflectorMetricsProvider sets the metrics provider
+func SetReflectorMetricsProvider(metricsProvider MetricsProvider) {
+	metricsFactory.setProviders.Do(func() {
+		metricsFactory.metricsProvider = metricsProvider
+	})
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/metrics-server/vendor/k8s.io/client-go/tools/cache/shared_informer.go
new file mode 100644
index 0000000..5f8c507
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/shared_informer.go
@@ -0,0 +1,597 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+	"fmt"
+	"sync"
+	"time"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/clock"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/apimachinery/pkg/util/wait"
+	"k8s.io/client-go/util/buffer"
+	"k8s.io/client-go/util/retry"
+
+	"github.com/golang/glog"
+)
+
+// SharedInformer has a shared data cache and is capable of distributing notifications for changes
+// to the cache to multiple listeners who registered via AddEventHandler. If you use this, there is
+// one behavior change compared to a standard Informer.  When you receive a notification, the cache
+// will be AT LEAST as fresh as the notification, but it MAY be more fresh.  You should NOT depend
+// on the contents of the cache exactly matching the notification you've received in handler
+// functions.  If there was a create, followed by a delete, the cache may NOT have your item.  This
+// has advantages over the broadcaster since it allows us to share a common cache across many
+// controllers. Extending the broadcaster would have required us keep duplicate caches for each
+// watch.
+type SharedInformer interface {
+	// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
+	// period.  Events to a single handler are delivered sequentially, but there is no coordination
+	// between different handlers.
+	AddEventHandler(handler ResourceEventHandler)
+	// AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the
+	// specified resync period.  Events to a single handler are delivered sequentially, but there is
+	// no coordination between different handlers.
+	AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration)
+	// GetStore returns the Store.
+	GetStore() Store
+	// GetController gives back a synthetic interface that "votes" to start the informer
+	GetController() Controller
+	// Run starts the shared informer, which will be stopped when stopCh is closed.
+	Run(stopCh <-chan struct{})
+	// HasSynced returns true if the shared informer's store has synced.
+	HasSynced() bool
+	// LastSyncResourceVersion is the resource version observed when last synced with the underlying
+	// store. The value returned is not synchronized with access to the underlying store and is not
+	// thread-safe.
+	LastSyncResourceVersion() string
+}
+
+type SharedIndexInformer interface {
+	SharedInformer
+	// AddIndexers add indexers to the informer before it starts.
+	AddIndexers(indexers Indexers) error
+	GetIndexer() Indexer
+}
+
+// NewSharedInformer creates a new instance for the listwatcher.
+func NewSharedInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
+	return NewSharedIndexInformer(lw, objType, resyncPeriod, Indexers{})
+}
+
+// NewSharedIndexInformer creates a new instance for the listwatcher.
+func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
+	realClock := &clock.RealClock{}
+	sharedIndexInformer := &sharedIndexInformer{
+		processor:                       &sharedProcessor{clock: realClock},
+		indexer:                         NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
+		listerWatcher:                   lw,
+		objectType:                      objType,
+		resyncCheckPeriod:               defaultEventHandlerResyncPeriod,
+		defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod,
+		cacheMutationDetector:           NewCacheMutationDetector(fmt.Sprintf("%T", objType)),
+		clock: realClock,
+	}
+	return sharedIndexInformer
+}
+
+// InformerSynced is a function that can be used to determine if an informer has synced.  This is useful for determining if caches have synced.
+type InformerSynced func() bool
+
+const (
+	// syncedPollPeriod controls how often you look at the status of your sync funcs
+	syncedPollPeriod = 100 * time.Millisecond
+
+	// initialBufferSize is the initial number of event notifications that can be buffered.
+	initialBufferSize = 1024
+)
+
+// WaitForCacheSync waits for caches to populate.  It returns true if it was successful, false
+// if the controller should shutdown
+func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool {
+	err := wait.PollUntil(syncedPollPeriod,
+		func() (bool, error) {
+			for _, syncFunc := range cacheSyncs {
+				if !syncFunc() {
+					return false, nil
+				}
+			}
+			return true, nil
+		},
+		stopCh)
+	if err != nil {
+		glog.V(2).Infof("stop requested")
+		return false
+	}
+
+	glog.V(4).Infof("caches populated")
+	return true
+}
+
+type sharedIndexInformer struct {
+	indexer    Indexer
+	controller Controller
+
+	processor             *sharedProcessor
+	cacheMutationDetector CacheMutationDetector
+
+	// This block is tracked to handle late initialization of the controller
+	listerWatcher ListerWatcher
+	objectType    runtime.Object
+
+	// resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call
+	// shouldResync to check if any of our listeners need a resync.
+	resyncCheckPeriod time.Duration
+	// defaultEventHandlerResyncPeriod is the default resync period for any handlers added via
+	// AddEventHandler (i.e. they don't specify one and just want to use the shared informer's default
+	// value).
+	defaultEventHandlerResyncPeriod time.Duration
+	// clock allows for testability
+	clock clock.Clock
+
+	started, stopped bool
+	startedLock      sync.Mutex
+
+	// blockDeltas gives a way to stop all event distribution so that a late event handler
+	// can safely join the shared informer.
+	blockDeltas sync.Mutex
+}
+
+// dummyController hides the fact that a SharedInformer is different from a dedicated one
+// where a caller can `Run`.  The run method is disconnected in this case, because higher
+// level logic will decide when to start the SharedInformer and related controller.
+// Because returning information back is always asynchronous, the legacy callers shouldn't
+// notice any change in behavior.
+type dummyController struct {
+	informer *sharedIndexInformer
+}
+
+func (v *dummyController) Run(stopCh <-chan struct{}) {
+}
+
+func (v *dummyController) HasSynced() bool {
+	return v.informer.HasSynced()
+}
+
+func (c *dummyController) LastSyncResourceVersion() string {
+	return ""
+}
+
+type updateNotification struct {
+	oldObj interface{}
+	newObj interface{}
+}
+
+type addNotification struct {
+	newObj interface{}
+}
+
+type deleteNotification struct {
+	oldObj interface{}
+}
+
+func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
+	defer utilruntime.HandleCrash()
+
+	fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, s.indexer)
+
+	cfg := &Config{
+		Queue:            fifo,
+		ListerWatcher:    s.listerWatcher,
+		ObjectType:       s.objectType,
+		FullResyncPeriod: s.resyncCheckPeriod,
+		RetryOnError:     false,
+		ShouldResync:     s.processor.shouldResync,
+
+		Process: s.HandleDeltas,
+	}
+
+	func() {
+		s.startedLock.Lock()
+		defer s.startedLock.Unlock()
+
+		s.controller = New(cfg)
+		s.controller.(*controller).clock = s.clock
+		s.started = true
+	}()
+
+	// Separate stop channel because Processor should be stopped strictly after controller
+	processorStopCh := make(chan struct{})
+	var wg wait.Group
+	defer wg.Wait()              // Wait for Processor to stop
+	defer close(processorStopCh) // Tell Processor to stop
+	wg.StartWithChannel(processorStopCh, s.cacheMutationDetector.Run)
+	wg.StartWithChannel(processorStopCh, s.processor.run)
+
+	defer func() {
+		s.startedLock.Lock()
+		defer s.startedLock.Unlock()
+		s.stopped = true // Don't want any new listeners
+	}()
+	s.controller.Run(stopCh)
+}
+
+func (s *sharedIndexInformer) HasSynced() bool {
+	s.startedLock.Lock()
+	defer s.startedLock.Unlock()
+
+	if s.controller == nil {
+		return false
+	}
+	return s.controller.HasSynced()
+}
+
+func (s *sharedIndexInformer) LastSyncResourceVersion() string {
+	s.startedLock.Lock()
+	defer s.startedLock.Unlock()
+
+	if s.controller == nil {
+		return ""
+	}
+	return s.controller.LastSyncResourceVersion()
+}
+
+func (s *sharedIndexInformer) GetStore() Store {
+	return s.indexer
+}
+
+func (s *sharedIndexInformer) GetIndexer() Indexer {
+	return s.indexer
+}
+
+func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error {
+	s.startedLock.Lock()
+	defer s.startedLock.Unlock()
+
+	if s.started {
+		return fmt.Errorf("informer has already started")
+	}
+
+	return s.indexer.AddIndexers(indexers)
+}
+
+func (s *sharedIndexInformer) GetController() Controller {
+	return &dummyController{informer: s}
+}
+
+func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) {
+	s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod)
+}
+
+func determineResyncPeriod(desired, check time.Duration) time.Duration {
+	if desired == 0 {
+		return desired
+	}
+	if check == 0 {
+		glog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired)
+		return 0
+	}
+	if desired < check {
+		glog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check)
+		return check
+	}
+	return desired
+}
+
+const minimumResyncPeriod = 1 * time.Second
+
+func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) {
+	s.startedLock.Lock()
+	defer s.startedLock.Unlock()
+
+	if s.stopped {
+		glog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler)
+		return
+	}
+
+	if resyncPeriod > 0 {
+		if resyncPeriod < minimumResyncPeriod {
+			glog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod)
+			resyncPeriod = minimumResyncPeriod
+		}
+
+		if resyncPeriod < s.resyncCheckPeriod {
+			if s.started {
+				glog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
+				resyncPeriod = s.resyncCheckPeriod
+			} else {
+				// if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update
+				// resyncCheckPeriod to match resyncPeriod and adjust the resync periods of all the listeners
+				// accordingly
+				s.resyncCheckPeriod = resyncPeriod
+				s.processor.resyncCheckPeriodChanged(resyncPeriod)
+			}
+		}
+	}
+
+	listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize)
+
+	if !s.started {
+		s.processor.addListener(listener)
+		return
+	}
+
+	// in order to safely join, we have to
+	// 1. stop sending add/update/delete notifications
+	// 2. do a list against the store
+	// 3. send synthetic "Add" events to the new handler
+	// 4. unblock
+	s.blockDeltas.Lock()
+	defer s.blockDeltas.Unlock()
+
+	s.processor.addListener(listener)
+	for _, item := range s.indexer.List() {
+		listener.add(addNotification{newObj: item})
+	}
+}
+
+func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
+	s.blockDeltas.Lock()
+	defer s.blockDeltas.Unlock()
+
+	// from oldest to newest
+	for _, d := range obj.(Deltas) {
+		switch d.Type {
+		case Sync, Added, Updated:
+			isSync := d.Type == Sync
+			s.cacheMutationDetector.AddObject(d.Object)
+			if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
+				if err := s.indexer.Update(d.Object); err != nil {
+					return err
+				}
+				s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}, isSync)
+			} else {
+				if err := s.indexer.Add(d.Object); err != nil {
+					return err
+				}
+				s.processor.distribute(addNotification{newObj: d.Object}, isSync)
+			}
+		case Deleted:
+			if err := s.indexer.Delete(d.Object); err != nil {
+				return err
+			}
+			s.processor.distribute(deleteNotification{oldObj: d.Object}, false)
+		}
+	}
+	return nil
+}
+
+type sharedProcessor struct {
+	listenersStarted bool
+	listenersLock    sync.RWMutex
+	listeners        []*processorListener
+	syncingListeners []*processorListener
+	clock            clock.Clock
+	wg               wait.Group
+}
+
+func (p *sharedProcessor) addListener(listener *processorListener) {
+	p.listenersLock.Lock()
+	defer p.listenersLock.Unlock()
+
+	p.addListenerLocked(listener)
+	if p.listenersStarted {
+		p.wg.Start(listener.run)
+		p.wg.Start(listener.pop)
+	}
+}
+
+func (p *sharedProcessor) addListenerLocked(listener *processorListener) {
+	p.listeners = append(p.listeners, listener)
+	p.syncingListeners = append(p.syncingListeners, listener)
+}
+
+func (p *sharedProcessor) distribute(obj interface{}, sync bool) {
+	p.listenersLock.RLock()
+	defer p.listenersLock.RUnlock()
+
+	if sync {
+		for _, listener := range p.syncingListeners {
+			listener.add(obj)
+		}
+	} else {
+		for _, listener := range p.listeners {
+			listener.add(obj)
+		}
+	}
+}
+
+func (p *sharedProcessor) run(stopCh <-chan struct{}) {
+	func() {
+		p.listenersLock.RLock()
+		defer p.listenersLock.RUnlock()
+		for _, listener := range p.listeners {
+			p.wg.Start(listener.run)
+			p.wg.Start(listener.pop)
+		}
+		p.listenersStarted = true
+	}()
+	<-stopCh
+	p.listenersLock.RLock()
+	defer p.listenersLock.RUnlock()
+	for _, listener := range p.listeners {
+		close(listener.addCh) // Tell .pop() to stop. .pop() will tell .run() to stop
+	}
+	p.wg.Wait() // Wait for all .pop() and .run() to stop
+}
+
+// shouldResync queries every listener to determine if any of them need a resync, based on each
+// listener's resyncPeriod.
+func (p *sharedProcessor) shouldResync() bool {
+	p.listenersLock.Lock()
+	defer p.listenersLock.Unlock()
+
+	p.syncingListeners = []*processorListener{}
+
+	resyncNeeded := false
+	now := p.clock.Now()
+	for _, listener := range p.listeners {
+		// need to loop through all the listeners to see if they need to resync so we can prepare any
+		// listeners that are going to be resyncing.
+		if listener.shouldResync(now) {
+			resyncNeeded = true
+			p.syncingListeners = append(p.syncingListeners, listener)
+			listener.determineNextResync(now)
+		}
+	}
+	return resyncNeeded
+}
+
+func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Duration) {
+	p.listenersLock.RLock()
+	defer p.listenersLock.RUnlock()
+
+	for _, listener := range p.listeners {
+		resyncPeriod := determineResyncPeriod(listener.requestedResyncPeriod, resyncCheckPeriod)
+		listener.setResyncPeriod(resyncPeriod)
+	}
+}
+
+type processorListener struct {
+	nextCh chan interface{}
+	addCh  chan interface{}
+
+	handler ResourceEventHandler
+
+	// pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed.
+	// There is one per listener, but a failing/stalled listener will have infinite pendingNotifications
+	// added until we OOM.
+	// TODO: This is no worse than before, since reflectors were backed by unbounded DeltaFIFOs, but
+	// we should try to do something better.
+	pendingNotifications buffer.RingGrowing
+
+	// requestedResyncPeriod is how frequently the listener wants a full resync from the shared informer
+	requestedResyncPeriod time.Duration
+	// resyncPeriod is how frequently the listener wants a full resync from the shared informer. This
+	// value may differ from requestedResyncPeriod if the shared informer adjusts it to align with the
+	// informer's overall resync check period.
+	resyncPeriod time.Duration
+	// nextResync is the earliest time the listener should get a full resync
+	nextResync time.Time
+	// resyncLock guards access to resyncPeriod and nextResync
+	resyncLock sync.Mutex
+}
+
+func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener {
+	ret := &processorListener{
+		nextCh:                make(chan interface{}),
+		addCh:                 make(chan interface{}),
+		handler:               handler,
+		pendingNotifications:  *buffer.NewRingGrowing(bufferSize),
+		requestedResyncPeriod: requestedResyncPeriod,
+		resyncPeriod:          resyncPeriod,
+	}
+
+	ret.determineNextResync(now)
+
+	return ret
+}
+
+func (p *processorListener) add(notification interface{}) {
+	p.addCh <- notification
+}
+
+func (p *processorListener) pop() {
+	defer utilruntime.HandleCrash()
+	defer close(p.nextCh) // Tell .run() to stop
+
+	var nextCh chan<- interface{}
+	var notification interface{}
+	for {
+		select {
+		case nextCh <- notification:
+			// Notification dispatched
+			var ok bool
+			notification, ok = p.pendingNotifications.ReadOne()
+			if !ok { // Nothing to pop
+				nextCh = nil // Disable this select case
+			}
+		case notificationToAdd, ok := <-p.addCh:
+			if !ok {
+				return
+			}
+			if notification == nil { // No notification to pop (and pendingNotifications is empty)
+				// Optimize the case - skip adding to pendingNotifications
+				notification = notificationToAdd
+				nextCh = p.nextCh
+			} else { // There is already a notification waiting to be dispatched
+				p.pendingNotifications.WriteOne(notificationToAdd)
+			}
+		}
+	}
+}
+
+func (p *processorListener) run() {
+	// this call blocks until the channel is closed.  When a panic happens during the notification
+	// we will catch it, **the offending item will be skipped!**, and after a short delay (one second)
+	// the next notification will be attempted.  This is usually better than the alternative of never
+	// delivering again.
+	stopCh := make(chan struct{})
+	wait.Until(func() {
+		// this gives us a few quick retries before a long pause and then a few more quick retries
+		err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) {
+			for next := range p.nextCh {
+				switch notification := next.(type) {
+				case updateNotification:
+					p.handler.OnUpdate(notification.oldObj, notification.newObj)
+				case addNotification:
+					p.handler.OnAdd(notification.newObj)
+				case deleteNotification:
+					p.handler.OnDelete(notification.oldObj)
+				default:
+					utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next))
+				}
+			}
+			// the only way to get here is if the p.nextCh is empty and closed
+			return true, nil
+		})
+
+		// the only way to get here is if the p.nextCh is empty and closed
+		if err == nil {
+			close(stopCh)
+		}
+	}, 1*time.Minute, stopCh)
+}
+
+// shouldResync deterimines if the listener needs a resync. If the listener's resyncPeriod is 0,
+// this always returns false.
+func (p *processorListener) shouldResync(now time.Time) bool {
+	p.resyncLock.Lock()
+	defer p.resyncLock.Unlock()
+
+	if p.resyncPeriod == 0 {
+		return false
+	}
+
+	return now.After(p.nextResync) || now.Equal(p.nextResync)
+}
+
+func (p *processorListener) determineNextResync(now time.Time) {
+	p.resyncLock.Lock()
+	defer p.resyncLock.Unlock()
+
+	p.nextResync = now.Add(p.resyncPeriod)
+}
+
+func (p *processorListener) setResyncPeriod(resyncPeriod time.Duration) {
+	p.resyncLock.Lock()
+	defer p.resyncLock.Unlock()
+
+	p.resyncPeriod = resyncPeriod
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/store.go b/metrics-server/vendor/k8s.io/client-go/tools/cache/store.go
new file mode 100755
index 0000000..4958987
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/store.go
@@ -0,0 +1,244 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+	"fmt"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/api/meta"
+)
+
+// Store is a generic object storage interface. Reflector knows how to watch a server
+// and update a store. A generic store is provided, which allows Reflector to be used
+// as a local caching system, and an LRU store, which allows Reflector to work like a
+// queue of items yet to be processed.
+//
+// Store makes no assumptions about stored object identity; it is the responsibility
+// of a Store implementation to provide a mechanism to correctly key objects and to
+// define the contract for obtaining objects by some arbitrary key type.
+type Store interface {
+	Add(obj interface{}) error
+	Update(obj interface{}) error
+	Delete(obj interface{}) error
+	List() []interface{}
+	ListKeys() []string
+	Get(obj interface{}) (item interface{}, exists bool, err error)
+	GetByKey(key string) (item interface{}, exists bool, err error)
+
+	// Replace will delete the contents of the store, using instead the
+	// given list. Store takes ownership of the list, you should not reference
+	// it after calling this function.
+	Replace([]interface{}, string) error
+	Resync() error
+}
+
+// KeyFunc knows how to make a key from an object. Implementations should be deterministic.
+type KeyFunc func(obj interface{}) (string, error)
+
+// KeyError will be returned any time a KeyFunc gives an error; it includes the object
+// at fault.
+type KeyError struct {
+	Obj interface{}
+	Err error
+}
+
+// Error gives a human-readable description of the error.
+func (k KeyError) Error() string {
+	return fmt.Sprintf("couldn't create key for object %+v: %v", k.Obj, k.Err)
+}
+
+// ExplicitKey can be passed to MetaNamespaceKeyFunc if you have the key for
+// the object but not the object itself.
+type ExplicitKey string
+
+// MetaNamespaceKeyFunc is a convenient default KeyFunc which knows how to make
+// keys for API objects which implement meta.Interface.
+// The key uses the format <namespace>/<name> unless <namespace> is empty, then
+// it's just <name>.
+//
+// TODO: replace key-as-string with a key-as-struct so that this
+// packing/unpacking won't be necessary.
+func MetaNamespaceKeyFunc(obj interface{}) (string, error) {
+	if key, ok := obj.(ExplicitKey); ok {
+		return string(key), nil
+	}
+	meta, err := meta.Accessor(obj)
+	if err != nil {
+		return "", fmt.Errorf("object has no meta: %v", err)
+	}
+	if len(meta.GetNamespace()) > 0 {
+		return meta.GetNamespace() + "/" + meta.GetName(), nil
+	}
+	return meta.GetName(), nil
+}
+
+// SplitMetaNamespaceKey returns the namespace and name that
+// MetaNamespaceKeyFunc encoded into key.
+//
+// TODO: replace key-as-string with a key-as-struct so that this
+// packing/unpacking won't be necessary.
+func SplitMetaNamespaceKey(key string) (namespace, name string, err error) {
+	parts := strings.Split(key, "/")
+	switch len(parts) {
+	case 1:
+		// name only, no namespace
+		return "", parts[0], nil
+	case 2:
+		// namespace and name
+		return parts[0], parts[1], nil
+	}
+
+	return "", "", fmt.Errorf("unexpected key format: %q", key)
+}
+
+// cache responsibilities are limited to:
+//	1. Computing keys for objects via keyFunc
+//  2. Invoking methods of a ThreadSafeStorage interface
+type cache struct {
+	// cacheStorage bears the burden of thread safety for the cache
+	cacheStorage ThreadSafeStore
+	// keyFunc is used to make the key for objects stored in and retrieved from items, and
+	// should be deterministic.
+	keyFunc KeyFunc
+}
+
+var _ Store = &cache{}
+
+// Add inserts an item into the cache.
+func (c *cache) Add(obj interface{}) error {
+	key, err := c.keyFunc(obj)
+	if err != nil {
+		return KeyError{obj, err}
+	}
+	c.cacheStorage.Add(key, obj)
+	return nil
+}
+
+// Update sets an item in the cache to its updated state.
+func (c *cache) Update(obj interface{}) error {
+	key, err := c.keyFunc(obj)
+	if err != nil {
+		return KeyError{obj, err}
+	}
+	c.cacheStorage.Update(key, obj)
+	return nil
+}
+
+// Delete removes an item from the cache.
+func (c *cache) Delete(obj interface{}) error {
+	key, err := c.keyFunc(obj)
+	if err != nil {
+		return KeyError{obj, err}
+	}
+	c.cacheStorage.Delete(key)
+	return nil
+}
+
+// List returns a list of all the items.
+// List is completely threadsafe as long as you treat all items as immutable.
+func (c *cache) List() []interface{} {
+	return c.cacheStorage.List()
+}
+
+// ListKeys returns a list of all the keys of the objects currently
+// in the cache.
+func (c *cache) ListKeys() []string {
+	return c.cacheStorage.ListKeys()
+}
+
+// GetIndexers returns the indexers of cache
+func (c *cache) GetIndexers() Indexers {
+	return c.cacheStorage.GetIndexers()
+}
+
+// Index returns a list of items that match on the index function
+// Index is thread-safe so long as you treat all items as immutable
+func (c *cache) Index(indexName string, obj interface{}) ([]interface{}, error) {
+	return c.cacheStorage.Index(indexName, obj)
+}
+
+func (c *cache) IndexKeys(indexName, indexKey string) ([]string, error) {
+	return c.cacheStorage.IndexKeys(indexName, indexKey)
+}
+
+// ListIndexFuncValues returns the list of generated values of an Index func
+func (c *cache) ListIndexFuncValues(indexName string) []string {
+	return c.cacheStorage.ListIndexFuncValues(indexName)
+}
+
+func (c *cache) ByIndex(indexName, indexKey string) ([]interface{}, error) {
+	return c.cacheStorage.ByIndex(indexName, indexKey)
+}
+
+func (c *cache) AddIndexers(newIndexers Indexers) error {
+	return c.cacheStorage.AddIndexers(newIndexers)
+}
+
+// Get returns the requested item, or sets exists=false.
+// Get is completely threadsafe as long as you treat all items as immutable.
+func (c *cache) Get(obj interface{}) (item interface{}, exists bool, err error) {
+	key, err := c.keyFunc(obj)
+	if err != nil {
+		return nil, false, KeyError{obj, err}
+	}
+	return c.GetByKey(key)
+}
+
+// GetByKey returns the request item, or exists=false.
+// GetByKey is completely threadsafe as long as you treat all items as immutable.
+func (c *cache) GetByKey(key string) (item interface{}, exists bool, err error) {
+	item, exists = c.cacheStorage.Get(key)
+	return item, exists, nil
+}
+
+// Replace will delete the contents of 'c', using instead the given list.
+// 'c' takes ownership of the list, you should not reference the list again
+// after calling this function.
+func (c *cache) Replace(list []interface{}, resourceVersion string) error {
+	items := map[string]interface{}{}
+	for _, item := range list {
+		key, err := c.keyFunc(item)
+		if err != nil {
+			return KeyError{item, err}
+		}
+		items[key] = item
+	}
+	c.cacheStorage.Replace(items, resourceVersion)
+	return nil
+}
+
+// Resync touches all items in the store to force processing
+func (c *cache) Resync() error {
+	return c.cacheStorage.Resync()
+}
+
+// NewStore returns a Store implemented simply with a map and a lock.
+func NewStore(keyFunc KeyFunc) Store {
+	return &cache{
+		cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}),
+		keyFunc:      keyFunc,
+	}
+}
+
+// NewIndexer returns an Indexer implemented simply with a map and a lock.
+func NewIndexer(keyFunc KeyFunc, indexers Indexers) Indexer {
+	return &cache{
+		cacheStorage: NewThreadSafeStore(indexers, Indices{}),
+		keyFunc:      keyFunc,
+	}
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/metrics-server/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
new file mode 100644
index 0000000..1c201ef
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
@@ -0,0 +1,304 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+	"fmt"
+	"sync"
+
+	"k8s.io/apimachinery/pkg/util/sets"
+)
+
+// ThreadSafeStore is an interface that allows concurrent access to a storage backend.
+// TL;DR caveats: you must not modify anything returned by Get or List as it will break
+// the indexing feature in addition to not being thread safe.
+//
+// The guarantees of thread safety provided by List/Get are only valid if the caller
+// treats returned items as read-only. For example, a pointer inserted in the store
+// through `Add` will be returned as is by `Get`. Multiple clients might invoke `Get`
+// on the same key and modify the pointer in a non-thread-safe way. Also note that
+// modifying objects stored by the indexers (if any) will *not* automatically lead
+// to a re-index. So it's not a good idea to directly modify the objects returned by
+// Get/List, in general.
+type ThreadSafeStore interface {
+	Add(key string, obj interface{})
+	Update(key string, obj interface{})
+	Delete(key string)
+	Get(key string) (item interface{}, exists bool)
+	List() []interface{}
+	ListKeys() []string
+	Replace(map[string]interface{}, string)
+	Index(indexName string, obj interface{}) ([]interface{}, error)
+	IndexKeys(indexName, indexKey string) ([]string, error)
+	ListIndexFuncValues(name string) []string
+	ByIndex(indexName, indexKey string) ([]interface{}, error)
+	GetIndexers() Indexers
+
+	// AddIndexers adds more indexers to this store.  If you call this after you already have data
+	// in the store, the results are undefined.
+	AddIndexers(newIndexers Indexers) error
+	Resync() error
+}
+
+// threadSafeMap implements ThreadSafeStore
+type threadSafeMap struct {
+	lock  sync.RWMutex
+	items map[string]interface{}
+
+	// indexers maps a name to an IndexFunc
+	indexers Indexers
+	// indices maps a name to an Index
+	indices Indices
+}
+
+func (c *threadSafeMap) Add(key string, obj interface{}) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+	oldObject := c.items[key]
+	c.items[key] = obj
+	c.updateIndices(oldObject, obj, key)
+}
+
+func (c *threadSafeMap) Update(key string, obj interface{}) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+	oldObject := c.items[key]
+	c.items[key] = obj
+	c.updateIndices(oldObject, obj, key)
+}
+
+func (c *threadSafeMap) Delete(key string) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+	if obj, exists := c.items[key]; exists {
+		c.deleteFromIndices(obj, key)
+		delete(c.items, key)
+	}
+}
+
+func (c *threadSafeMap) Get(key string) (item interface{}, exists bool) {
+	c.lock.RLock()
+	defer c.lock.RUnlock()
+	item, exists = c.items[key]
+	return item, exists
+}
+
+func (c *threadSafeMap) List() []interface{} {
+	c.lock.RLock()
+	defer c.lock.RUnlock()
+	list := make([]interface{}, 0, len(c.items))
+	for _, item := range c.items {
+		list = append(list, item)
+	}
+	return list
+}
+
+// ListKeys returns a list of all the keys of the objects currently
+// in the threadSafeMap.
+func (c *threadSafeMap) ListKeys() []string {
+	c.lock.RLock()
+	defer c.lock.RUnlock()
+	list := make([]string, 0, len(c.items))
+	for key := range c.items {
+		list = append(list, key)
+	}
+	return list
+}
+
+func (c *threadSafeMap) Replace(items map[string]interface{}, resourceVersion string) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+	c.items = items
+
+	// rebuild any index
+	c.indices = Indices{}
+	for key, item := range c.items {
+		c.updateIndices(nil, item, key)
+	}
+}
+
+// Index returns a list of items that match on the index function
+// Index is thread-safe so long as you treat all items as immutable
+func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) {
+	c.lock.RLock()
+	defer c.lock.RUnlock()
+
+	indexFunc := c.indexers[indexName]
+	if indexFunc == nil {
+		return nil, fmt.Errorf("Index with name %s does not exist", indexName)
+	}
+
+	indexKeys, err := indexFunc(obj)
+	if err != nil {
+		return nil, err
+	}
+	index := c.indices[indexName]
+
+	// need to de-dupe the return list.  Since multiple keys are allowed, this can happen.
+	returnKeySet := sets.String{}
+	for _, indexKey := range indexKeys {
+		set := index[indexKey]
+		for _, key := range set.UnsortedList() {
+			returnKeySet.Insert(key)
+		}
+	}
+
+	list := make([]interface{}, 0, returnKeySet.Len())
+	for absoluteKey := range returnKeySet {
+		list = append(list, c.items[absoluteKey])
+	}
+	return list, nil
+}
+
+// ByIndex returns a list of items that match an exact value on the index function
+func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, error) {
+	c.lock.RLock()
+	defer c.lock.RUnlock()
+
+	indexFunc := c.indexers[indexName]
+	if indexFunc == nil {
+		return nil, fmt.Errorf("Index with name %s does not exist", indexName)
+	}
+
+	index := c.indices[indexName]
+
+	set := index[indexKey]
+	list := make([]interface{}, 0, set.Len())
+	for _, key := range set.List() {
+		list = append(list, c.items[key])
+	}
+
+	return list, nil
+}
+
+// IndexKeys returns a list of keys that match on the index function.
+// IndexKeys is thread-safe so long as you treat all items as immutable.
+func (c *threadSafeMap) IndexKeys(indexName, indexKey string) ([]string, error) {
+	c.lock.RLock()
+	defer c.lock.RUnlock()
+
+	indexFunc := c.indexers[indexName]
+	if indexFunc == nil {
+		return nil, fmt.Errorf("Index with name %s does not exist", indexName)
+	}
+
+	index := c.indices[indexName]
+
+	set := index[indexKey]
+	return set.List(), nil
+}
+
+func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string {
+	c.lock.RLock()
+	defer c.lock.RUnlock()
+
+	index := c.indices[indexName]
+	names := make([]string, 0, len(index))
+	for key := range index {
+		names = append(names, key)
+	}
+	return names
+}
+
+func (c *threadSafeMap) GetIndexers() Indexers {
+	return c.indexers
+}
+
+func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	if len(c.items) > 0 {
+		return fmt.Errorf("cannot add indexers to running index")
+	}
+
+	oldKeys := sets.StringKeySet(c.indexers)
+	newKeys := sets.StringKeySet(newIndexers)
+
+	if oldKeys.HasAny(newKeys.List()...) {
+		return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys))
+	}
+
+	for k, v := range newIndexers {
+		c.indexers[k] = v
+	}
+	return nil
+}
+
+// updateIndices modifies the objects location in the managed indexes, if this is an update, you must provide an oldObj
+// updateIndices must be called from a function that already has a lock on the cache
+func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) {
+	// if we got an old object, we need to remove it before we add it again
+	if oldObj != nil {
+		c.deleteFromIndices(oldObj, key)
+	}
+	for name, indexFunc := range c.indexers {
+		indexValues, err := indexFunc(newObj)
+		if err != nil {
+			panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
+		}
+		index := c.indices[name]
+		if index == nil {
+			index = Index{}
+			c.indices[name] = index
+		}
+
+		for _, indexValue := range indexValues {
+			set := index[indexValue]
+			if set == nil {
+				set = sets.String{}
+				index[indexValue] = set
+			}
+			set.Insert(key)
+		}
+	}
+}
+
+// deleteFromIndices removes the object from each of the managed indexes
+// it is intended to be called from a function that already has a lock on the cache
+func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) {
+	for name, indexFunc := range c.indexers {
+		indexValues, err := indexFunc(obj)
+		if err != nil {
+			panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
+		}
+
+		index := c.indices[name]
+		if index == nil {
+			continue
+		}
+		for _, indexValue := range indexValues {
+			set := index[indexValue]
+			if set != nil {
+				set.Delete(key)
+			}
+		}
+	}
+}
+
+func (c *threadSafeMap) Resync() error {
+	// Nothing to do
+	return nil
+}
+
+func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore {
+	return &threadSafeMap{
+		items:    map[string]interface{}{},
+		indexers: indexers,
+		indices:  indices,
+	}
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/cache/undelta_store.go b/metrics-server/vendor/k8s.io/client-go/tools/cache/undelta_store.go
new file mode 100644
index 0000000..117df46
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/cache/undelta_store.go
@@ -0,0 +1,83 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+// UndeltaStore listens to incremental updates and sends complete state on every change.
+// It implements the Store interface so that it can receive a stream of mirrored objects
+// from Reflector.  Whenever it receives any complete (Store.Replace) or incremental change
+// (Store.Add, Store.Update, Store.Delete), it sends the complete state by calling PushFunc.
+// It is thread-safe.  It guarantees that every change (Add, Update, Replace, Delete) results
+// in one call to PushFunc, but sometimes PushFunc may be called twice with the same values.
+// PushFunc should be thread safe.
+type UndeltaStore struct {
+	Store
+	PushFunc func([]interface{})
+}
+
+// Assert that it implements the Store interface.
+var _ Store = &UndeltaStore{}
+
+// Note about thread safety.  The Store implementation (cache.cache) uses a lock for all methods.
+// In the functions below, the lock gets released and reacquired betweend the {Add,Delete,etc}
+// and the List.  So, the following can happen, resulting in two identical calls to PushFunc.
+// time            thread 1                  thread 2
+// 0               UndeltaStore.Add(a)
+// 1                                         UndeltaStore.Add(b)
+// 2               Store.Add(a)
+// 3                                         Store.Add(b)
+// 4               Store.List() -> [a,b]
+// 5                                         Store.List() -> [a,b]
+
+func (u *UndeltaStore) Add(obj interface{}) error {
+	if err := u.Store.Add(obj); err != nil {
+		return err
+	}
+	u.PushFunc(u.Store.List())
+	return nil
+}
+
+func (u *UndeltaStore) Update(obj interface{}) error {
+	if err := u.Store.Update(obj); err != nil {
+		return err
+	}
+	u.PushFunc(u.Store.List())
+	return nil
+}
+
+func (u *UndeltaStore) Delete(obj interface{}) error {
+	if err := u.Store.Delete(obj); err != nil {
+		return err
+	}
+	u.PushFunc(u.Store.List())
+	return nil
+}
+
+func (u *UndeltaStore) Replace(list []interface{}, resourceVersion string) error {
+	if err := u.Store.Replace(list, resourceVersion); err != nil {
+		return err
+	}
+	u.PushFunc(u.Store.List())
+	return nil
+}
+
+// NewUndeltaStore returns an UndeltaStore implemented with a Store.
+func NewUndeltaStore(pushFunc func([]interface{}), keyFunc KeyFunc) *UndeltaStore {
+	return &UndeltaStore{
+		Store:    NewStore(keyFunc),
+		PushFunc: pushFunc,
+	}
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go
new file mode 100644
index 0000000..0a08187
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+package api
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
new file mode 100644
index 0000000..43e2648
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
@@ -0,0 +1,183 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path"
+	"path/filepath"
+)
+
+func init() {
+	sDec, _ := base64.StdEncoding.DecodeString("REDACTED+")
+	redactedBytes = []byte(string(sDec))
+}
+
+// IsConfigEmpty returns true if the config is empty.
+func IsConfigEmpty(config *Config) bool {
+	return len(config.AuthInfos) == 0 && len(config.Clusters) == 0 && len(config.Contexts) == 0 &&
+		len(config.CurrentContext) == 0 &&
+		len(config.Preferences.Extensions) == 0 && !config.Preferences.Colors &&
+		len(config.Extensions) == 0
+}
+
+// MinifyConfig read the current context and uses that to keep only the relevant pieces of config
+// This is useful for making secrets based on kubeconfig files
+func MinifyConfig(config *Config) error {
+	if len(config.CurrentContext) == 0 {
+		return errors.New("current-context must exist in order to minify")
+	}
+
+	currContext, exists := config.Contexts[config.CurrentContext]
+	if !exists {
+		return fmt.Errorf("cannot locate context %v", config.CurrentContext)
+	}
+
+	newContexts := map[string]*Context{}
+	newContexts[config.CurrentContext] = currContext
+
+	newClusters := map[string]*Cluster{}
+	if len(currContext.Cluster) > 0 {
+		if _, exists := config.Clusters[currContext.Cluster]; !exists {
+			return fmt.Errorf("cannot locate cluster %v", currContext.Cluster)
+		}
+
+		newClusters[currContext.Cluster] = config.Clusters[currContext.Cluster]
+	}
+
+	newAuthInfos := map[string]*AuthInfo{}
+	if len(currContext.AuthInfo) > 0 {
+		if _, exists := config.AuthInfos[currContext.AuthInfo]; !exists {
+			return fmt.Errorf("cannot locate user %v", currContext.AuthInfo)
+		}
+
+		newAuthInfos[currContext.AuthInfo] = config.AuthInfos[currContext.AuthInfo]
+	}
+
+	config.AuthInfos = newAuthInfos
+	config.Clusters = newClusters
+	config.Contexts = newContexts
+
+	return nil
+}
+
+var redactedBytes []byte
+
+// Flatten redacts raw data entries from the config object for a human-readable view.
+func ShortenConfig(config *Config) {
+	// trick json encoder into printing a human readable string in the raw data
+	// by base64 decoding what we want to print. Relies on implementation of
+	// http://golang.org/pkg/encoding/json/#Marshal using base64 to encode []byte
+	for key, authInfo := range config.AuthInfos {
+		if len(authInfo.ClientKeyData) > 0 {
+			authInfo.ClientKeyData = redactedBytes
+		}
+		if len(authInfo.ClientCertificateData) > 0 {
+			authInfo.ClientCertificateData = redactedBytes
+		}
+		config.AuthInfos[key] = authInfo
+	}
+	for key, cluster := range config.Clusters {
+		if len(cluster.CertificateAuthorityData) > 0 {
+			cluster.CertificateAuthorityData = redactedBytes
+		}
+		config.Clusters[key] = cluster
+	}
+}
+
+// Flatten changes the config object into a self contained config (useful for making secrets)
+func FlattenConfig(config *Config) error {
+	for key, authInfo := range config.AuthInfos {
+		baseDir, err := MakeAbs(path.Dir(authInfo.LocationOfOrigin), "")
+		if err != nil {
+			return err
+		}
+
+		if err := FlattenContent(&authInfo.ClientCertificate, &authInfo.ClientCertificateData, baseDir); err != nil {
+			return err
+		}
+		if err := FlattenContent(&authInfo.ClientKey, &authInfo.ClientKeyData, baseDir); err != nil {
+			return err
+		}
+
+		config.AuthInfos[key] = authInfo
+	}
+	for key, cluster := range config.Clusters {
+		baseDir, err := MakeAbs(path.Dir(cluster.LocationOfOrigin), "")
+		if err != nil {
+			return err
+		}
+
+		if err := FlattenContent(&cluster.CertificateAuthority, &cluster.CertificateAuthorityData, baseDir); err != nil {
+			return err
+		}
+
+		config.Clusters[key] = cluster
+	}
+
+	return nil
+}
+
+func FlattenContent(path *string, contents *[]byte, baseDir string) error {
+	if len(*path) != 0 {
+		if len(*contents) > 0 {
+			return errors.New("cannot have values for both path and contents")
+		}
+
+		var err error
+		absPath := ResolvePath(*path, baseDir)
+		*contents, err = ioutil.ReadFile(absPath)
+		if err != nil {
+			return err
+		}
+
+		*path = ""
+	}
+
+	return nil
+}
+
+// ResolvePath returns the path as an absolute paths, relative to the given base directory
+func ResolvePath(path string, base string) string {
+	// Don't resolve empty paths
+	if len(path) > 0 {
+		// Don't resolve absolute paths
+		if !filepath.IsAbs(path) {
+			return filepath.Join(base, path)
+		}
+	}
+
+	return path
+}
+
+func MakeAbs(path, base string) (string, error) {
+	if filepath.IsAbs(path) {
+		return path, nil
+	}
+	if len(base) == 0 {
+		cwd, err := os.Getwd()
+		if err != nil {
+			return "", err
+		}
+		base = cwd
+	}
+	return filepath.Join(base, path), nil
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go
new file mode 100644
index 0000000..5fbbe3f
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package latest
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer/json"
+	"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
+	"k8s.io/client-go/tools/clientcmd/api"
+	"k8s.io/client-go/tools/clientcmd/api/v1"
+)
+
+// Version is the string that represents the current external default version.
+const Version = "v1"
+
+var ExternalVersion = schema.GroupVersion{Group: "", Version: "v1"}
+
+// OldestVersion is the string that represents the oldest server version supported,
+// for client code that wants to hardcode the lowest common denominator.
+const OldestVersion = "v1"
+
+// Versions is the list of versions that are recognized in code. The order provided
+// may be assumed to be least feature rich to most feature rich, and clients may
+// choose to prefer the latter items in the list over the former items when presented
+// with a set of versions to choose.
+var Versions = []string{"v1"}
+
+var (
+	Codec  runtime.Codec
+	Scheme *runtime.Scheme
+)
+
+func init() {
+	Scheme = runtime.NewScheme()
+	if err := api.AddToScheme(Scheme); err != nil {
+		// Programmer error, detect immediately
+		panic(err)
+	}
+	if err := v1.AddToScheme(Scheme); err != nil {
+		// Programmer error, detect immediately
+		panic(err)
+	}
+	yamlSerializer := json.NewYAMLSerializer(json.DefaultMetaFactory, Scheme, Scheme)
+	Codec = versioning.NewDefaultingCodecForScheme(
+		Scheme,
+		yamlSerializer,
+		yamlSerializer,
+		schema.GroupVersion{Version: Version},
+		runtime.InternalGroupVersioner,
+	)
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/register.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/register.go
new file mode 100644
index 0000000..2eec388
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/register.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+// TODO this should be in the "kubeconfig" group
+var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal}
+
+var (
+	SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+	AddToScheme   = SchemeBuilder.AddToScheme
+)
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Config{},
+	)
+	return nil
+}
+
+func (obj *Config) GetObjectKind() schema.ObjectKind { return obj }
+func (obj *Config) SetGroupVersionKind(gvk schema.GroupVersionKind) {
+	obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
+}
+func (obj *Config) GroupVersionKind() schema.GroupVersionKind {
+	return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
new file mode 100644
index 0000000..1391df7
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
@@ -0,0 +1,218 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// Where possible, json tags match the cli argument names.
+// Top level config objects and all values required for proper functioning are not "omitempty".  Any truly optional piece of config is allowed to be omitted.
+
+// Config holds the information needed to build connect to remote kubernetes clusters as a given user
+// IMPORTANT if you add fields to this struct, please update IsConfigEmpty()
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type Config struct {
+	// Legacy field from pkg/api/types.go TypeMeta.
+	// TODO(jlowdermilk): remove this after eliminating downstream dependencies.
+	// +optional
+	Kind string `json:"kind,omitempty"`
+	// Legacy field from pkg/api/types.go TypeMeta.
+	// TODO(jlowdermilk): remove this after eliminating downstream dependencies.
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty"`
+	// Preferences holds general information to be use for cli interactions
+	Preferences Preferences `json:"preferences"`
+	// Clusters is a map of referencable names to cluster configs
+	Clusters map[string]*Cluster `json:"clusters"`
+	// AuthInfos is a map of referencable names to user configs
+	AuthInfos map[string]*AuthInfo `json:"users"`
+	// Contexts is a map of referencable names to context configs
+	Contexts map[string]*Context `json:"contexts"`
+	// CurrentContext is the name of the context that you would like to use by default
+	CurrentContext string `json:"current-context"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// IMPORTANT if you add fields to this struct, please update IsConfigEmpty()
+type Preferences struct {
+	// +optional
+	Colors bool `json:"colors,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// Cluster contains information about how to communicate with a kubernetes cluster
+type Cluster struct {
+	// LocationOfOrigin indicates where this object came from.  It is used for round tripping config post-merge, but never serialized.
+	LocationOfOrigin string
+	// Server is the address of the kubernetes cluster (https://hostname:port).
+	Server string `json:"server"`
+	// InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure.
+	// +optional
+	InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`
+	// CertificateAuthority is the path to a cert file for the certificate authority.
+	// +optional
+	CertificateAuthority string `json:"certificate-authority,omitempty"`
+	// CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority
+	// +optional
+	CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// AuthInfo contains information that describes identity information.  This is use to tell the kubernetes cluster who you are.
+type AuthInfo struct {
+	// LocationOfOrigin indicates where this object came from.  It is used for round tripping config post-merge, but never serialized.
+	LocationOfOrigin string
+	// ClientCertificate is the path to a client cert file for TLS.
+	// +optional
+	ClientCertificate string `json:"client-certificate,omitempty"`
+	// ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate
+	// +optional
+	ClientCertificateData []byte `json:"client-certificate-data,omitempty"`
+	// ClientKey is the path to a client key file for TLS.
+	// +optional
+	ClientKey string `json:"client-key,omitempty"`
+	// ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey
+	// +optional
+	ClientKeyData []byte `json:"client-key-data,omitempty"`
+	// Token is the bearer token for authentication to the kubernetes cluster.
+	// +optional
+	Token string `json:"token,omitempty"`
+	// TokenFile is a pointer to a file that contains a bearer token (as described above).  If both Token and TokenFile are present, Token takes precedence.
+	// +optional
+	TokenFile string `json:"tokenFile,omitempty"`
+	// Impersonate is the username to act-as.
+	// +optional
+	Impersonate string `json:"act-as,omitempty"`
+	// ImpersonateGroups is the groups to imperonate.
+	// +optional
+	ImpersonateGroups []string `json:"act-as-groups,omitempty"`
+	// ImpersonateUserExtra contains additional information for impersonated user.
+	// +optional
+	ImpersonateUserExtra map[string][]string `json:"act-as-user-extra,omitempty"`
+	// Username is the username for basic authentication to the kubernetes cluster.
+	// +optional
+	Username string `json:"username,omitempty"`
+	// Password is the password for basic authentication to the kubernetes cluster.
+	// +optional
+	Password string `json:"password,omitempty"`
+	// AuthProvider specifies a custom authentication plugin for the kubernetes cluster.
+	// +optional
+	AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"`
+	// Exec specifies a custom exec-based authentication plugin for the kubernetes cluster.
+	// +optional
+	Exec *ExecConfig `json:"exec,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with)
+type Context struct {
+	// LocationOfOrigin indicates where this object came from.  It is used for round tripping config post-merge, but never serialized.
+	LocationOfOrigin string
+	// Cluster is the name of the cluster for this context
+	Cluster string `json:"cluster"`
+	// AuthInfo is the name of the authInfo for this context
+	AuthInfo string `json:"user"`
+	// Namespace is the default namespace to use on unspecified requests
+	// +optional
+	Namespace string `json:"namespace,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// AuthProviderConfig holds the configuration for a specified auth provider.
+type AuthProviderConfig struct {
+	Name string `json:"name"`
+	// +optional
+	Config map[string]string `json:"config,omitempty"`
+}
+
+// ExecConfig specifies a command to provide client credentials. The command is exec'd
+// and outputs structured stdout holding credentials.
+//
+// See the client.authentiction.k8s.io API group for specifications of the exact input
+// and output format
+type ExecConfig struct {
+	// Command to execute.
+	Command string `json:"command"`
+	// Arguments to pass to the command when executing it.
+	// +optional
+	Args []string `json:"args"`
+	// Env defines additional environment variables to expose to the process. These
+	// are unioned with the host's environment, as well as variables client-go uses
+	// to pass argument to the plugin.
+	// +optional
+	Env []ExecEnvVar `json:"env"`
+
+	// Preferred input version of the ExecInfo. The returned ExecCredentials MUST use
+	// the same encoding version as the input.
+	APIVersion string `json:"apiVersion,omitempty"`
+}
+
+// ExecEnvVar is used for setting environment variables when executing an exec-based
+// credential plugin.
+type ExecEnvVar struct {
+	Name  string `json:"name"`
+	Value string `json:"value"`
+}
+
+// NewConfig is a convenience function that returns a new Config object with non-nil maps
+func NewConfig() *Config {
+	return &Config{
+		Preferences: *NewPreferences(),
+		Clusters:    make(map[string]*Cluster),
+		AuthInfos:   make(map[string]*AuthInfo),
+		Contexts:    make(map[string]*Context),
+		Extensions:  make(map[string]runtime.Object),
+	}
+}
+
+// NewContext is a convenience function that returns a new Context
+// object with non-nil maps
+func NewContext() *Context {
+	return &Context{Extensions: make(map[string]runtime.Object)}
+}
+
+// NewCluster is a convenience function that returns a new Cluster
+// object with non-nil maps
+func NewCluster() *Cluster {
+	return &Cluster{Extensions: make(map[string]runtime.Object)}
+}
+
+// NewAuthInfo is a convenience function that returns a new AuthInfo
+// object with non-nil maps
+func NewAuthInfo() *AuthInfo {
+	return &AuthInfo{
+		Extensions:           make(map[string]runtime.Object),
+		ImpersonateUserExtra: make(map[string][]string),
+	}
+}
+
+// NewPreferences is a convenience function that returns a new
+// Preferences object with non-nil maps
+func NewPreferences() *Preferences {
+	return &Preferences{Extensions: make(map[string]runtime.Object)}
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go
new file mode 100644
index 0000000..2d7142e
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go
@@ -0,0 +1,244 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"fmt"
+	"sort"
+
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/client-go/tools/clientcmd/api"
+)
+
+func addConversionFuncs(scheme *runtime.Scheme) error {
+	return scheme.AddConversionFuncs(
+		func(in *Cluster, out *api.Cluster, s conversion.Scope) error {
+			return s.DefaultConvert(in, out, conversion.IgnoreMissingFields)
+		},
+		func(in *api.Cluster, out *Cluster, s conversion.Scope) error {
+			return s.DefaultConvert(in, out, conversion.IgnoreMissingFields)
+		},
+		func(in *Preferences, out *api.Preferences, s conversion.Scope) error {
+			return s.DefaultConvert(in, out, conversion.IgnoreMissingFields)
+		},
+		func(in *api.Preferences, out *Preferences, s conversion.Scope) error {
+			return s.DefaultConvert(in, out, conversion.IgnoreMissingFields)
+		},
+		func(in *AuthInfo, out *api.AuthInfo, s conversion.Scope) error {
+			return s.DefaultConvert(in, out, conversion.IgnoreMissingFields)
+		},
+		func(in *api.AuthInfo, out *AuthInfo, s conversion.Scope) error {
+			return s.DefaultConvert(in, out, conversion.IgnoreMissingFields)
+		},
+		func(in *Context, out *api.Context, s conversion.Scope) error {
+			return s.DefaultConvert(in, out, conversion.IgnoreMissingFields)
+		},
+		func(in *api.Context, out *Context, s conversion.Scope) error {
+			return s.DefaultConvert(in, out, conversion.IgnoreMissingFields)
+		},
+
+		func(in *Config, out *api.Config, s conversion.Scope) error {
+			out.CurrentContext = in.CurrentContext
+			if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil {
+				return err
+			}
+
+			out.Clusters = make(map[string]*api.Cluster)
+			if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil {
+				return err
+			}
+			out.AuthInfos = make(map[string]*api.AuthInfo)
+			if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil {
+				return err
+			}
+			out.Contexts = make(map[string]*api.Context)
+			if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil {
+				return err
+			}
+			out.Extensions = make(map[string]runtime.Object)
+			if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil {
+				return err
+			}
+			return nil
+		},
+		func(in *api.Config, out *Config, s conversion.Scope) error {
+			out.CurrentContext = in.CurrentContext
+			if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil {
+				return err
+			}
+
+			out.Clusters = make([]NamedCluster, 0, 0)
+			if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil {
+				return err
+			}
+			out.AuthInfos = make([]NamedAuthInfo, 0, 0)
+			if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil {
+				return err
+			}
+			out.Contexts = make([]NamedContext, 0, 0)
+			if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil {
+				return err
+			}
+			out.Extensions = make([]NamedExtension, 0, 0)
+			if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil {
+				return err
+			}
+			return nil
+		},
+		func(in *[]NamedCluster, out *map[string]*api.Cluster, s conversion.Scope) error {
+			for _, curr := range *in {
+				newCluster := api.NewCluster()
+				if err := s.Convert(&curr.Cluster, newCluster, 0); err != nil {
+					return err
+				}
+				if (*out)[curr.Name] == nil {
+					(*out)[curr.Name] = newCluster
+				} else {
+					return fmt.Errorf("error converting *[]NamedCluster into *map[string]*api.Cluster: duplicate name \"%v\" in list: %v", curr.Name, *in)
+				}
+			}
+
+			return nil
+		},
+		func(in *map[string]*api.Cluster, out *[]NamedCluster, s conversion.Scope) error {
+			allKeys := make([]string, 0, len(*in))
+			for key := range *in {
+				allKeys = append(allKeys, key)
+			}
+			sort.Strings(allKeys)
+
+			for _, key := range allKeys {
+				newCluster := (*in)[key]
+				oldCluster := &Cluster{}
+				if err := s.Convert(newCluster, oldCluster, 0); err != nil {
+					return err
+				}
+
+				namedCluster := NamedCluster{key, *oldCluster}
+				*out = append(*out, namedCluster)
+			}
+
+			return nil
+		},
+		func(in *[]NamedAuthInfo, out *map[string]*api.AuthInfo, s conversion.Scope) error {
+			for _, curr := range *in {
+				newAuthInfo := api.NewAuthInfo()
+				if err := s.Convert(&curr.AuthInfo, newAuthInfo, 0); err != nil {
+					return err
+				}
+				if (*out)[curr.Name] == nil {
+					(*out)[curr.Name] = newAuthInfo
+				} else {
+					return fmt.Errorf("error converting *[]NamedAuthInfo into *map[string]*api.AuthInfo: duplicate name \"%v\" in list: %v", curr.Name, *in)
+				}
+			}
+
+			return nil
+		},
+		func(in *map[string]*api.AuthInfo, out *[]NamedAuthInfo, s conversion.Scope) error {
+			allKeys := make([]string, 0, len(*in))
+			for key := range *in {
+				allKeys = append(allKeys, key)
+			}
+			sort.Strings(allKeys)
+
+			for _, key := range allKeys {
+				newAuthInfo := (*in)[key]
+				oldAuthInfo := &AuthInfo{}
+				if err := s.Convert(newAuthInfo, oldAuthInfo, 0); err != nil {
+					return err
+				}
+
+				namedAuthInfo := NamedAuthInfo{key, *oldAuthInfo}
+				*out = append(*out, namedAuthInfo)
+			}
+
+			return nil
+		},
+		func(in *[]NamedContext, out *map[string]*api.Context, s conversion.Scope) error {
+			for _, curr := range *in {
+				newContext := api.NewContext()
+				if err := s.Convert(&curr.Context, newContext, 0); err != nil {
+					return err
+				}
+				if (*out)[curr.Name] == nil {
+					(*out)[curr.Name] = newContext
+				} else {
+					return fmt.Errorf("error converting *[]NamedContext into *map[string]*api.Context: duplicate name \"%v\" in list: %v", curr.Name, *in)
+				}
+			}
+
+			return nil
+		},
+		func(in *map[string]*api.Context, out *[]NamedContext, s conversion.Scope) error {
+			allKeys := make([]string, 0, len(*in))
+			for key := range *in {
+				allKeys = append(allKeys, key)
+			}
+			sort.Strings(allKeys)
+
+			for _, key := range allKeys {
+				newContext := (*in)[key]
+				oldContext := &Context{}
+				if err := s.Convert(newContext, oldContext, 0); err != nil {
+					return err
+				}
+
+				namedContext := NamedContext{key, *oldContext}
+				*out = append(*out, namedContext)
+			}
+
+			return nil
+		},
+		func(in *[]NamedExtension, out *map[string]runtime.Object, s conversion.Scope) error {
+			for _, curr := range *in {
+				var newExtension runtime.Object
+				if err := s.Convert(&curr.Extension, &newExtension, 0); err != nil {
+					return err
+				}
+				if (*out)[curr.Name] == nil {
+					(*out)[curr.Name] = newExtension
+				} else {
+					return fmt.Errorf("error converting *[]NamedExtension into *map[string]runtime.Object: duplicate name \"%v\" in list: %v", curr.Name, *in)
+				}
+			}
+
+			return nil
+		},
+		func(in *map[string]runtime.Object, out *[]NamedExtension, s conversion.Scope) error {
+			allKeys := make([]string, 0, len(*in))
+			for key := range *in {
+				allKeys = append(allKeys, key)
+			}
+			sort.Strings(allKeys)
+
+			for _, key := range allKeys {
+				newExtension := (*in)[key]
+				oldExtension := &runtime.RawExtension{}
+				if err := s.Convert(newExtension, oldExtension, 0); err != nil {
+					return err
+				}
+
+				namedExtension := NamedExtension{key, *oldExtension}
+				*out = append(*out, namedExtension)
+			}
+
+			return nil
+		},
+	)
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go
new file mode 100644
index 0000000..9750cf7
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+package v1
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go
new file mode 100644
index 0000000..7b91d50
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+// TODO this should be in the "kubeconfig" group
+var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      runtime.SchemeBuilder
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+	// We only register manually written functions here. The registration of the
+	// generated functions takes place in the generated files. The separation
+	// makes the code compile even when the generated files are missing.
+	localSchemeBuilder.Register(addKnownTypes, addConversionFuncs)
+}
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Config{},
+	)
+	return nil
+}
+
+func (obj *Config) GetObjectKind() schema.ObjectKind { return obj }
+func (obj *Config) SetGroupVersionKind(gvk schema.GroupVersionKind) {
+	obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
+}
+func (obj *Config) GroupVersionKind() schema.GroupVersionKind {
+	return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go
new file mode 100644
index 0000000..56afb60
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go
@@ -0,0 +1,203 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// Where possible, json tags match the cli argument names.
+// Top level config objects and all values required for proper functioning are not "omitempty".  Any truly optional piece of config is allowed to be omitted.
+
+// Config holds the information needed to build connect to remote kubernetes clusters as a given user
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type Config struct {
+	// Legacy field from pkg/api/types.go TypeMeta.
+	// TODO(jlowdermilk): remove this after eliminating downstream dependencies.
+	// +optional
+	Kind string `json:"kind,omitempty"`
+	// Legacy field from pkg/api/types.go TypeMeta.
+	// TODO(jlowdermilk): remove this after eliminating downstream dependencies.
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty"`
+	// Preferences holds general information to be use for cli interactions
+	Preferences Preferences `json:"preferences"`
+	// Clusters is a map of referencable names to cluster configs
+	Clusters []NamedCluster `json:"clusters"`
+	// AuthInfos is a map of referencable names to user configs
+	AuthInfos []NamedAuthInfo `json:"users"`
+	// Contexts is a map of referencable names to context configs
+	Contexts []NamedContext `json:"contexts"`
+	// CurrentContext is the name of the context that you would like to use by default
+	CurrentContext string `json:"current-context"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions []NamedExtension `json:"extensions,omitempty"`
+}
+
+type Preferences struct {
+	// +optional
+	Colors bool `json:"colors,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions []NamedExtension `json:"extensions,omitempty"`
+}
+
+// Cluster contains information about how to communicate with a kubernetes cluster
+type Cluster struct {
+	// Server is the address of the kubernetes cluster (https://hostname:port).
+	Server string `json:"server"`
+	// InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure.
+	// +optional
+	InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`
+	// CertificateAuthority is the path to a cert file for the certificate authority.
+	// +optional
+	CertificateAuthority string `json:"certificate-authority,omitempty"`
+	// CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority
+	// +optional
+	CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions []NamedExtension `json:"extensions,omitempty"`
+}
+
+// AuthInfo contains information that describes identity information.  This is use to tell the kubernetes cluster who you are.
+type AuthInfo struct {
+	// ClientCertificate is the path to a client cert file for TLS.
+	// +optional
+	ClientCertificate string `json:"client-certificate,omitempty"`
+	// ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate
+	// +optional
+	ClientCertificateData []byte `json:"client-certificate-data,omitempty"`
+	// ClientKey is the path to a client key file for TLS.
+	// +optional
+	ClientKey string `json:"client-key,omitempty"`
+	// ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey
+	// +optional
+	ClientKeyData []byte `json:"client-key-data,omitempty"`
+	// Token is the bearer token for authentication to the kubernetes cluster.
+	// +optional
+	Token string `json:"token,omitempty"`
+	// TokenFile is a pointer to a file that contains a bearer token (as described above).  If both Token and TokenFile are present, Token takes precedence.
+	// +optional
+	TokenFile string `json:"tokenFile,omitempty"`
+	// Impersonate is the username to imperonate.  The name matches the flag.
+	// +optional
+	Impersonate string `json:"as,omitempty"`
+	// ImpersonateGroups is the groups to imperonate.
+	// +optional
+	ImpersonateGroups []string `json:"as-groups,omitempty"`
+	// ImpersonateUserExtra contains additional information for impersonated user.
+	// +optional
+	ImpersonateUserExtra map[string][]string `json:"as-user-extra,omitempty"`
+	// Username is the username for basic authentication to the kubernetes cluster.
+	// +optional
+	Username string `json:"username,omitempty"`
+	// Password is the password for basic authentication to the kubernetes cluster.
+	// +optional
+	Password string `json:"password,omitempty"`
+	// AuthProvider specifies a custom authentication plugin for the kubernetes cluster.
+	// +optional
+	AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"`
+	// Exec specifies a custom exec-based authentication plugin for the kubernetes cluster.
+	// +optional
+	Exec *ExecConfig `json:"exec,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions []NamedExtension `json:"extensions,omitempty"`
+}
+
+// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with)
+type Context struct {
+	// Cluster is the name of the cluster for this context
+	Cluster string `json:"cluster"`
+	// AuthInfo is the name of the authInfo for this context
+	AuthInfo string `json:"user"`
+	// Namespace is the default namespace to use on unspecified requests
+	// +optional
+	Namespace string `json:"namespace,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions []NamedExtension `json:"extensions,omitempty"`
+}
+
+// NamedCluster relates nicknames to cluster information
+type NamedCluster struct {
+	// Name is the nickname for this Cluster
+	Name string `json:"name"`
+	// Cluster holds the cluster information
+	Cluster Cluster `json:"cluster"`
+}
+
+// NamedContext relates nicknames to context information
+type NamedContext struct {
+	// Name is the nickname for this Context
+	Name string `json:"name"`
+	// Context holds the context information
+	Context Context `json:"context"`
+}
+
+// NamedAuthInfo relates nicknames to auth information
+type NamedAuthInfo struct {
+	// Name is the nickname for this AuthInfo
+	Name string `json:"name"`
+	// AuthInfo holds the auth information
+	AuthInfo AuthInfo `json:"user"`
+}
+
+// NamedExtension relates nicknames to extension information
+type NamedExtension struct {
+	// Name is the nickname for this Extension
+	Name string `json:"name"`
+	// Extension holds the extension information
+	Extension runtime.RawExtension `json:"extension"`
+}
+
+// AuthProviderConfig holds the configuration for a specified auth provider.
+type AuthProviderConfig struct {
+	Name   string            `json:"name"`
+	Config map[string]string `json:"config"`
+}
+
+// ExecConfig specifies a command to provide client credentials. The command is exec'd
+// and outputs structured stdout holding credentials.
+//
+// See the client.authentiction.k8s.io API group for specifications of the exact input
+// and output format
+type ExecConfig struct {
+	// Command to execute.
+	Command string `json:"command"`
+	// Arguments to pass to the command when executing it.
+	// +optional
+	Args []string `json:"args"`
+	// Env defines additional environment variables to expose to the process. These
+	// are unioned with the host's environment, as well as variables client-go uses
+	// to pass argument to the plugin.
+	// +optional
+	Env []ExecEnvVar `json:"env"`
+
+	// Preferred input version of the ExecInfo. The returned ExecCredentials MUST use
+	// the same encoding version as the input.
+	APIVersion string `json:"apiVersion,omitempty"`
+}
+
+// ExecEnvVar is used for setting environment variables when executing an exec-based
+// credential plugin.
+type ExecEnvVar struct {
+	Name  string `json:"name"`
+	Value string `json:"value"`
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..bb0f352
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go
@@ -0,0 +1,353 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthInfo) DeepCopyInto(out *AuthInfo) {
+	*out = *in
+	if in.ClientCertificateData != nil {
+		in, out := &in.ClientCertificateData, &out.ClientCertificateData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.ClientKeyData != nil {
+		in, out := &in.ClientKeyData, &out.ClientKeyData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.ImpersonateGroups != nil {
+		in, out := &in.ImpersonateGroups, &out.ImpersonateGroups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ImpersonateUserExtra != nil {
+		in, out := &in.ImpersonateUserExtra, &out.ImpersonateUserExtra
+		*out = make(map[string][]string, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = make([]string, len(val))
+				copy((*out)[key], val)
+			}
+		}
+	}
+	if in.AuthProvider != nil {
+		in, out := &in.AuthProvider, &out.AuthProvider
+		if *in == nil {
+			*out = nil
+		} else {
+			*out = new(AuthProviderConfig)
+			(*in).DeepCopyInto(*out)
+		}
+	}
+	if in.Exec != nil {
+		in, out := &in.Exec, &out.Exec
+		if *in == nil {
+			*out = nil
+		} else {
+			*out = new(ExecConfig)
+			(*in).DeepCopyInto(*out)
+		}
+	}
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make([]NamedExtension, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthInfo.
+func (in *AuthInfo) DeepCopy() *AuthInfo {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthInfo)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthProviderConfig) DeepCopyInto(out *AuthProviderConfig) {
+	*out = *in
+	if in.Config != nil {
+		in, out := &in.Config, &out.Config
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthProviderConfig.
+func (in *AuthProviderConfig) DeepCopy() *AuthProviderConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthProviderConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Cluster) DeepCopyInto(out *Cluster) {
+	*out = *in
+	if in.CertificateAuthorityData != nil {
+		in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make([]NamedExtension, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
+func (in *Cluster) DeepCopy() *Cluster {
+	if in == nil {
+		return nil
+	}
+	out := new(Cluster)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Config) DeepCopyInto(out *Config) {
+	*out = *in
+	in.Preferences.DeepCopyInto(&out.Preferences)
+	if in.Clusters != nil {
+		in, out := &in.Clusters, &out.Clusters
+		*out = make([]NamedCluster, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.AuthInfos != nil {
+		in, out := &in.AuthInfos, &out.AuthInfos
+		*out = make([]NamedAuthInfo, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Contexts != nil {
+		in, out := &in.Contexts, &out.Contexts
+		*out = make([]NamedContext, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make([]NamedExtension, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
+func (in *Config) DeepCopy() *Config {
+	if in == nil {
+		return nil
+	}
+	out := new(Config)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Config) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Context) DeepCopyInto(out *Context) {
+	*out = *in
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make([]NamedExtension, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Context.
+func (in *Context) DeepCopy() *Context {
+	if in == nil {
+		return nil
+	}
+	out := new(Context)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecConfig) DeepCopyInto(out *ExecConfig) {
+	*out = *in
+	if in.Args != nil {
+		in, out := &in.Args, &out.Args
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Env != nil {
+		in, out := &in.Env, &out.Env
+		*out = make([]ExecEnvVar, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecConfig.
+func (in *ExecConfig) DeepCopy() *ExecConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecEnvVar) DeepCopyInto(out *ExecEnvVar) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecEnvVar.
+func (in *ExecEnvVar) DeepCopy() *ExecEnvVar {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecEnvVar)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedAuthInfo) DeepCopyInto(out *NamedAuthInfo) {
+	*out = *in
+	in.AuthInfo.DeepCopyInto(&out.AuthInfo)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedAuthInfo.
+func (in *NamedAuthInfo) DeepCopy() *NamedAuthInfo {
+	if in == nil {
+		return nil
+	}
+	out := new(NamedAuthInfo)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedCluster) DeepCopyInto(out *NamedCluster) {
+	*out = *in
+	in.Cluster.DeepCopyInto(&out.Cluster)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCluster.
+func (in *NamedCluster) DeepCopy() *NamedCluster {
+	if in == nil {
+		return nil
+	}
+	out := new(NamedCluster)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedContext) DeepCopyInto(out *NamedContext) {
+	*out = *in
+	in.Context.DeepCopyInto(&out.Context)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedContext.
+func (in *NamedContext) DeepCopy() *NamedContext {
+	if in == nil {
+		return nil
+	}
+	out := new(NamedContext)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedExtension) DeepCopyInto(out *NamedExtension) {
+	*out = *in
+	in.Extension.DeepCopyInto(&out.Extension)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedExtension.
+func (in *NamedExtension) DeepCopy() *NamedExtension {
+	if in == nil {
+		return nil
+	}
+	out := new(NamedExtension)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Preferences) DeepCopyInto(out *Preferences) {
+	*out = *in
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make([]NamedExtension, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preferences.
+func (in *Preferences) DeepCopy() *Preferences {
+	if in == nil {
+		return nil
+	}
+	out := new(Preferences)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
new file mode 100644
index 0000000..b90aa8d
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
@@ -0,0 +1,320 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package api
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthInfo) DeepCopyInto(out *AuthInfo) {
+	*out = *in
+	if in.ClientCertificateData != nil {
+		in, out := &in.ClientCertificateData, &out.ClientCertificateData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.ClientKeyData != nil {
+		in, out := &in.ClientKeyData, &out.ClientKeyData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.ImpersonateGroups != nil {
+		in, out := &in.ImpersonateGroups, &out.ImpersonateGroups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ImpersonateUserExtra != nil {
+		in, out := &in.ImpersonateUserExtra, &out.ImpersonateUserExtra
+		*out = make(map[string][]string, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = make([]string, len(val))
+				copy((*out)[key], val)
+			}
+		}
+	}
+	if in.AuthProvider != nil {
+		in, out := &in.AuthProvider, &out.AuthProvider
+		if *in == nil {
+			*out = nil
+		} else {
+			*out = new(AuthProviderConfig)
+			(*in).DeepCopyInto(*out)
+		}
+	}
+	if in.Exec != nil {
+		in, out := &in.Exec, &out.Exec
+		if *in == nil {
+			*out = nil
+		} else {
+			*out = new(ExecConfig)
+			(*in).DeepCopyInto(*out)
+		}
+	}
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make(map[string]runtime.Object, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = val.DeepCopyObject()
+			}
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthInfo.
+func (in *AuthInfo) DeepCopy() *AuthInfo {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthInfo)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthProviderConfig) DeepCopyInto(out *AuthProviderConfig) {
+	*out = *in
+	if in.Config != nil {
+		in, out := &in.Config, &out.Config
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthProviderConfig.
+func (in *AuthProviderConfig) DeepCopy() *AuthProviderConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthProviderConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Cluster) DeepCopyInto(out *Cluster) {
+	*out = *in
+	if in.CertificateAuthorityData != nil {
+		in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make(map[string]runtime.Object, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = val.DeepCopyObject()
+			}
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
+func (in *Cluster) DeepCopy() *Cluster {
+	if in == nil {
+		return nil
+	}
+	out := new(Cluster)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Config) DeepCopyInto(out *Config) {
+	*out = *in
+	in.Preferences.DeepCopyInto(&out.Preferences)
+	if in.Clusters != nil {
+		in, out := &in.Clusters, &out.Clusters
+		*out = make(map[string]*Cluster, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = new(Cluster)
+				val.DeepCopyInto((*out)[key])
+			}
+		}
+	}
+	if in.AuthInfos != nil {
+		in, out := &in.AuthInfos, &out.AuthInfos
+		*out = make(map[string]*AuthInfo, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = new(AuthInfo)
+				val.DeepCopyInto((*out)[key])
+			}
+		}
+	}
+	if in.Contexts != nil {
+		in, out := &in.Contexts, &out.Contexts
+		*out = make(map[string]*Context, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = new(Context)
+				val.DeepCopyInto((*out)[key])
+			}
+		}
+	}
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make(map[string]runtime.Object, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = val.DeepCopyObject()
+			}
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
+func (in *Config) DeepCopy() *Config {
+	if in == nil {
+		return nil
+	}
+	out := new(Config)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Config) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Context) DeepCopyInto(out *Context) {
+	*out = *in
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make(map[string]runtime.Object, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = val.DeepCopyObject()
+			}
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Context.
+func (in *Context) DeepCopy() *Context {
+	if in == nil {
+		return nil
+	}
+	out := new(Context)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecConfig) DeepCopyInto(out *ExecConfig) {
+	*out = *in
+	if in.Args != nil {
+		in, out := &in.Args, &out.Args
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Env != nil {
+		in, out := &in.Env, &out.Env
+		*out = make([]ExecEnvVar, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecConfig.
+func (in *ExecConfig) DeepCopy() *ExecConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecEnvVar) DeepCopyInto(out *ExecEnvVar) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecEnvVar.
+func (in *ExecEnvVar) DeepCopy() *ExecEnvVar {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecEnvVar)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Preferences) DeepCopyInto(out *Preferences) {
+	*out = *in
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make(map[string]runtime.Object, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = val.DeepCopyObject()
+			}
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preferences.
+func (in *Preferences) DeepCopy() *Preferences {
+	if in == nil {
+		return nil
+	}
+	out := new(Preferences)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go
new file mode 100644
index 0000000..1d3c11d
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go
@@ -0,0 +1,111 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+
+	"golang.org/x/crypto/ssh/terminal"
+
+	clientauth "k8s.io/client-go/tools/auth"
+)
+
+// AuthLoaders are used to build clientauth.Info objects.
+type AuthLoader interface {
+	// LoadAuth takes a path to a config file and can then do anything it needs in order to return a valid clientauth.Info
+	LoadAuth(path string) (*clientauth.Info, error)
+}
+
+// default implementation of an AuthLoader
+type defaultAuthLoader struct{}
+
+// LoadAuth for defaultAuthLoader simply delegates to clientauth.LoadFromFile
+func (*defaultAuthLoader) LoadAuth(path string) (*clientauth.Info, error) {
+	return clientauth.LoadFromFile(path)
+}
+
+type PromptingAuthLoader struct {
+	reader io.Reader
+}
+
+// LoadAuth parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist.
+func (a *PromptingAuthLoader) LoadAuth(path string) (*clientauth.Info, error) {
+	// Prompt for user/pass and write a file if none exists.
+	if _, err := os.Stat(path); os.IsNotExist(err) {
+		authPtr, err := a.Prompt()
+		auth := *authPtr
+		if err != nil {
+			return nil, err
+		}
+		data, err := json.Marshal(auth)
+		if err != nil {
+			return &auth, err
+		}
+		err = ioutil.WriteFile(path, data, 0600)
+		return &auth, err
+	}
+	authPtr, err := clientauth.LoadFromFile(path)
+	if err != nil {
+		return nil, err
+	}
+	return authPtr, nil
+}
+
+// Prompt pulls the user and password from a reader
+func (a *PromptingAuthLoader) Prompt() (*clientauth.Info, error) {
+	var err error
+	auth := &clientauth.Info{}
+	auth.User, err = promptForString("Username", a.reader, true)
+	if err != nil {
+		return nil, err
+	}
+	auth.Password, err = promptForString("Password", nil, false)
+	if err != nil {
+		return nil, err
+	}
+	return auth, nil
+}
+
+func promptForString(field string, r io.Reader, show bool) (result string, err error) {
+	fmt.Printf("Please enter %s: ", field)
+	if show {
+		_, err = fmt.Fscan(r, &result)
+	} else {
+		var data []byte
+		if terminal.IsTerminal(int(os.Stdin.Fd())) {
+			data, err = terminal.ReadPassword(int(os.Stdin.Fd()))
+			result = string(data)
+		} else {
+			return "", fmt.Errorf("error reading input for %s", field)
+		}
+	}
+	return result, err
+}
+
+// NewPromptingAuthLoader is an AuthLoader that parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist.
+func NewPromptingAuthLoader(reader io.Reader) *PromptingAuthLoader {
+	return &PromptingAuthLoader{reader}
+}
+
+// NewDefaultAuthLoader returns a default implementation of an AuthLoader that only reads from a config file
+func NewDefaultAuthLoader() AuthLoader {
+	return &defaultAuthLoader{}
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/client_config.go
new file mode 100644
index 0000000..66331a7
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/client_config.go
@@ -0,0 +1,572 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/url"
+	"os"
+	"strings"
+
+	"github.com/golang/glog"
+	"github.com/imdario/mergo"
+
+	restclient "k8s.io/client-go/rest"
+	clientauth "k8s.io/client-go/tools/auth"
+	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+)
+
+var (
+	// ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields
+	// DEPRECATED will be replaced
+	ClusterDefaults = clientcmdapi.Cluster{Server: getDefaultServer()}
+	// DefaultClientConfig represents the legacy behavior of this package for defaulting
+	// DEPRECATED will be replace
+	DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{
+		ClusterDefaults: ClusterDefaults,
+	}, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}}
+)
+
+// getDefaultServer returns a default setting for DefaultClientConfig
+// DEPRECATED
+func getDefaultServer() string {
+	if server := os.Getenv("KUBERNETES_MASTER"); len(server) > 0 {
+		return server
+	}
+	return "http://localhost:8080"
+}
+
+// ClientConfig is used to make it easy to get an api server client
+type ClientConfig interface {
+	// RawConfig returns the merged result of all overrides
+	RawConfig() (clientcmdapi.Config, error)
+	// ClientConfig returns a complete client config
+	ClientConfig() (*restclient.Config, error)
+	// Namespace returns the namespace resulting from the merged
+	// result of all overrides and a boolean indicating if it was
+	// overridden
+	Namespace() (string, bool, error)
+	// ConfigAccess returns the rules for loading/persisting the config.
+	ConfigAccess() ConfigAccess
+}
+
+type PersistAuthProviderConfigForUser func(user string) restclient.AuthProviderConfigPersister
+
+type promptedCredentials struct {
+	username string
+	password string
+}
+
+// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information
+type DirectClientConfig struct {
+	config         clientcmdapi.Config
+	contextName    string
+	overrides      *ConfigOverrides
+	fallbackReader io.Reader
+	configAccess   ConfigAccess
+	// promptedCredentials store the credentials input by the user
+	promptedCredentials promptedCredentials
+}
+
+// NewDefaultClientConfig creates a DirectClientConfig using the config.CurrentContext as the context name
+func NewDefaultClientConfig(config clientcmdapi.Config, overrides *ConfigOverrides) ClientConfig {
+	return &DirectClientConfig{config, config.CurrentContext, overrides, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}}
+}
+
+// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information
+func NewNonInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, configAccess ConfigAccess) ClientConfig {
+	return &DirectClientConfig{config, contextName, overrides, nil, configAccess, promptedCredentials{}}
+}
+
+// NewInteractiveClientConfig creates a DirectClientConfig using the passed context name and a reader in case auth information is not provided via files or flags
+func NewInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, fallbackReader io.Reader, configAccess ConfigAccess) ClientConfig {
+	return &DirectClientConfig{config, contextName, overrides, fallbackReader, configAccess, promptedCredentials{}}
+}
+
+// NewClientConfigFromBytes takes your kubeconfig and gives you back a ClientConfig
+func NewClientConfigFromBytes(configBytes []byte) (ClientConfig, error) {
+	config, err := Load(configBytes)
+	if err != nil {
+		return nil, err
+	}
+
+	return &DirectClientConfig{*config, "", &ConfigOverrides{}, nil, nil, promptedCredentials{}}, nil
+}
+
+// RESTConfigFromKubeConfig is a convenience method to give back a restconfig from your kubeconfig bytes.
+// For programmatic access, this is what you want 80% of the time
+func RESTConfigFromKubeConfig(configBytes []byte) (*restclient.Config, error) {
+	clientConfig, err := NewClientConfigFromBytes(configBytes)
+	if err != nil {
+		return nil, err
+	}
+	return clientConfig.ClientConfig()
+}
+
+func (config *DirectClientConfig) RawConfig() (clientcmdapi.Config, error) {
+	return config.config, nil
+}
+
+// ClientConfig implements ClientConfig
+func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) {
+	// check that getAuthInfo, getContext, and getCluster do not return an error.
+	// Do this before checking if the current config is usable in the event that an
+	// AuthInfo, Context, or Cluster config with user-defined names are not found.
+	// This provides a user with the immediate cause for error if one is found
+	configAuthInfo, err := config.getAuthInfo()
+	if err != nil {
+		return nil, err
+	}
+
+	_, err = config.getContext()
+	if err != nil {
+		return nil, err
+	}
+
+	configClusterInfo, err := config.getCluster()
+	if err != nil {
+		return nil, err
+	}
+
+	if err := config.ConfirmUsable(); err != nil {
+		return nil, err
+	}
+
+	clientConfig := &restclient.Config{}
+	clientConfig.Host = configClusterInfo.Server
+
+	if len(config.overrides.Timeout) > 0 {
+		timeout, err := ParseTimeout(config.overrides.Timeout)
+		if err != nil {
+			return nil, err
+		}
+		clientConfig.Timeout = timeout
+	}
+
+	if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 {
+		u.RawQuery = ""
+		u.Fragment = ""
+		clientConfig.Host = u.String()
+	}
+	if len(configAuthInfo.Impersonate) > 0 {
+		clientConfig.Impersonate = restclient.ImpersonationConfig{
+			UserName: configAuthInfo.Impersonate,
+			Groups:   configAuthInfo.ImpersonateGroups,
+			Extra:    configAuthInfo.ImpersonateUserExtra,
+		}
+	}
+
+	// only try to read the auth information if we are secure
+	if restclient.IsConfigTransportTLS(*clientConfig) {
+		var err error
+
+		// mergo is a first write wins for map value and a last writing wins for interface values
+		// NOTE: This behavior changed with https://github.com/imdario/mergo/commit/d304790b2ed594794496464fadd89d2bb266600a.
+		//       Our mergo.Merge version is older than this change.
+		var persister restclient.AuthProviderConfigPersister
+		if config.configAccess != nil {
+			authInfoName, _ := config.getAuthInfoName()
+			persister = PersisterForUser(config.configAccess, authInfoName)
+		}
+		userAuthPartialConfig, err := config.getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister)
+		if err != nil {
+			return nil, err
+		}
+		mergo.Merge(clientConfig, userAuthPartialConfig)
+
+		serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo)
+		if err != nil {
+			return nil, err
+		}
+		mergo.Merge(clientConfig, serverAuthPartialConfig)
+	}
+
+	return clientConfig, nil
+}
+
+// clientauth.Info object contain both user identification and server identification.  We want different precedence orders for
+// both, so we have to split the objects and merge them separately
+// we want this order of precedence for the server identification
+// 1.  configClusterInfo (the final result of command line flags and merged .kubeconfig files)
+// 2.  configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
+// 3.  load the ~/.kubernetes_auth file as a default
+func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) {
+	mergedConfig := &restclient.Config{}
+
+	// configClusterInfo holds the information identify the server provided by .kubeconfig
+	configClientConfig := &restclient.Config{}
+	configClientConfig.CAFile = configClusterInfo.CertificateAuthority
+	configClientConfig.CAData = configClusterInfo.CertificateAuthorityData
+	configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify
+	mergo.Merge(mergedConfig, configClientConfig)
+
+	return mergedConfig, nil
+}
+
+// clientauth.Info object contain both user identification and server identification.  We want different precedence orders for
+// both, so we have to split the objects and merge them separately
+// we want this order of precedence for user identification
+// 1.  configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files)
+// 2.  configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
+// 3.  if there is not enough information to identify the user, load try the ~/.kubernetes_auth file
+// 4.  if there is not enough information to identify the user, prompt if possible
+func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister) (*restclient.Config, error) {
+	mergedConfig := &restclient.Config{}
+
+	// blindly overwrite existing values based on precedence
+	if len(configAuthInfo.Token) > 0 {
+		mergedConfig.BearerToken = configAuthInfo.Token
+	} else if len(configAuthInfo.TokenFile) > 0 {
+		tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile)
+		if err != nil {
+			return nil, err
+		}
+		mergedConfig.BearerToken = string(tokenBytes)
+	}
+	if len(configAuthInfo.Impersonate) > 0 {
+		mergedConfig.Impersonate = restclient.ImpersonationConfig{
+			UserName: configAuthInfo.Impersonate,
+			Groups:   configAuthInfo.ImpersonateGroups,
+			Extra:    configAuthInfo.ImpersonateUserExtra,
+		}
+	}
+	if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 {
+		mergedConfig.CertFile = configAuthInfo.ClientCertificate
+		mergedConfig.CertData = configAuthInfo.ClientCertificateData
+		mergedConfig.KeyFile = configAuthInfo.ClientKey
+		mergedConfig.KeyData = configAuthInfo.ClientKeyData
+	}
+	if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 {
+		mergedConfig.Username = configAuthInfo.Username
+		mergedConfig.Password = configAuthInfo.Password
+	}
+	if configAuthInfo.AuthProvider != nil {
+		mergedConfig.AuthProvider = configAuthInfo.AuthProvider
+		mergedConfig.AuthConfigPersister = persistAuthConfig
+	}
+	if configAuthInfo.Exec != nil {
+		mergedConfig.ExecProvider = configAuthInfo.Exec
+	}
+
+	// if there still isn't enough information to authenticate the user, try prompting
+	if !canIdentifyUser(*mergedConfig) && (fallbackReader != nil) {
+		if len(config.promptedCredentials.username) > 0 && len(config.promptedCredentials.password) > 0 {
+			mergedConfig.Username = config.promptedCredentials.username
+			mergedConfig.Password = config.promptedCredentials.password
+			return mergedConfig, nil
+		}
+		prompter := NewPromptingAuthLoader(fallbackReader)
+		promptedAuthInfo, err := prompter.Prompt()
+		if err != nil {
+			return nil, err
+		}
+		promptedConfig := makeUserIdentificationConfig(*promptedAuthInfo)
+		previouslyMergedConfig := mergedConfig
+		mergedConfig = &restclient.Config{}
+		mergo.Merge(mergedConfig, promptedConfig)
+		mergo.Merge(mergedConfig, previouslyMergedConfig)
+		config.promptedCredentials.username = mergedConfig.Username
+		config.promptedCredentials.password = mergedConfig.Password
+	}
+
+	return mergedConfig, nil
+}
+
+// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only user identification information
+func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config {
+	config := &restclient.Config{}
+	config.Username = info.User
+	config.Password = info.Password
+	config.CertFile = info.CertFile
+	config.KeyFile = info.KeyFile
+	config.BearerToken = info.BearerToken
+	return config
+}
+
+// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only server identification information
+func makeServerIdentificationConfig(info clientauth.Info) restclient.Config {
+	config := restclient.Config{}
+	config.CAFile = info.CAFile
+	if info.Insecure != nil {
+		config.Insecure = *info.Insecure
+	}
+	return config
+}
+
+func canIdentifyUser(config restclient.Config) bool {
+	return len(config.Username) > 0 ||
+		(len(config.CertFile) > 0 || len(config.CertData) > 0) ||
+		len(config.BearerToken) > 0 ||
+		config.AuthProvider != nil ||
+		config.ExecProvider != nil
+}
+
+// Namespace implements ClientConfig
+func (config *DirectClientConfig) Namespace() (string, bool, error) {
+	if config.overrides != nil && config.overrides.Context.Namespace != "" {
+		// In the event we have an empty config but we do have a namespace override, we should return
+		// the namespace override instead of having config.ConfirmUsable() return an error. This allows
+		// things like in-cluster clients to execute `kubectl get pods --namespace=foo` and have the
+		// --namespace flag honored instead of being ignored.
+		return config.overrides.Context.Namespace, true, nil
+	}
+
+	if err := config.ConfirmUsable(); err != nil {
+		return "", false, err
+	}
+
+	configContext, err := config.getContext()
+	if err != nil {
+		return "", false, err
+	}
+
+	if len(configContext.Namespace) == 0 {
+		return "default", false, nil
+	}
+
+	return configContext.Namespace, false, nil
+}
+
+// ConfigAccess implements ClientConfig
+func (config *DirectClientConfig) ConfigAccess() ConfigAccess {
+	return config.configAccess
+}
+
+// ConfirmUsable looks a particular context and determines if that particular part of the config is useable.  There might still be errors in the config,
+// but no errors in the sections requested or referenced.  It does not return early so that it can find as many errors as possible.
+func (config *DirectClientConfig) ConfirmUsable() error {
+	validationErrors := make([]error, 0)
+
+	var contextName string
+	if len(config.contextName) != 0 {
+		contextName = config.contextName
+	} else {
+		contextName = config.config.CurrentContext
+	}
+
+	if len(contextName) > 0 {
+		_, exists := config.config.Contexts[contextName]
+		if !exists {
+			validationErrors = append(validationErrors, &errContextNotFound{contextName})
+		}
+	}
+
+	authInfoName, _ := config.getAuthInfoName()
+	authInfo, _ := config.getAuthInfo()
+	validationErrors = append(validationErrors, validateAuthInfo(authInfoName, authInfo)...)
+	clusterName, _ := config.getClusterName()
+	cluster, _ := config.getCluster()
+	validationErrors = append(validationErrors, validateClusterInfo(clusterName, cluster)...)
+	// when direct client config is specified, and our only error is that no server is defined, we should
+	// return a standard "no config" error
+	if len(validationErrors) == 1 && validationErrors[0] == ErrEmptyCluster {
+		return newErrConfigurationInvalid([]error{ErrEmptyConfig})
+	}
+	return newErrConfigurationInvalid(validationErrors)
+}
+
+// getContextName returns the default, or user-set context name, and a boolean that indicates
+// whether the default context name has been overwritten by a user-set flag, or left as its default value
+func (config *DirectClientConfig) getContextName() (string, bool) {
+	if len(config.overrides.CurrentContext) != 0 {
+		return config.overrides.CurrentContext, true
+	}
+	if len(config.contextName) != 0 {
+		return config.contextName, false
+	}
+
+	return config.config.CurrentContext, false
+}
+
+// getAuthInfoName returns a string containing the current authinfo name for the current context,
+// and a boolean indicating  whether the default authInfo name is overwritten by a user-set flag, or
+// left as its default value
+func (config *DirectClientConfig) getAuthInfoName() (string, bool) {
+	if len(config.overrides.Context.AuthInfo) != 0 {
+		return config.overrides.Context.AuthInfo, true
+	}
+	context, _ := config.getContext()
+	return context.AuthInfo, false
+}
+
+// getClusterName returns a string containing the default, or user-set cluster name, and a boolean
+// indicating whether the default clusterName has been overwritten by a user-set flag, or left as
+// its default value
+func (config *DirectClientConfig) getClusterName() (string, bool) {
+	if len(config.overrides.Context.Cluster) != 0 {
+		return config.overrides.Context.Cluster, true
+	}
+	context, _ := config.getContext()
+	return context.Cluster, false
+}
+
+// getContext returns the clientcmdapi.Context, or an error if a required context is not found.
+func (config *DirectClientConfig) getContext() (clientcmdapi.Context, error) {
+	contexts := config.config.Contexts
+	contextName, required := config.getContextName()
+
+	mergedContext := clientcmdapi.NewContext()
+	if configContext, exists := contexts[contextName]; exists {
+		mergo.Merge(mergedContext, configContext)
+	} else if required {
+		return clientcmdapi.Context{}, fmt.Errorf("context %q does not exist", contextName)
+	}
+	mergo.Merge(mergedContext, config.overrides.Context)
+
+	return *mergedContext, nil
+}
+
+// getAuthInfo returns the clientcmdapi.AuthInfo, or an error if a required auth info is not found.
+func (config *DirectClientConfig) getAuthInfo() (clientcmdapi.AuthInfo, error) {
+	authInfos := config.config.AuthInfos
+	authInfoName, required := config.getAuthInfoName()
+
+	mergedAuthInfo := clientcmdapi.NewAuthInfo()
+	if configAuthInfo, exists := authInfos[authInfoName]; exists {
+		mergo.Merge(mergedAuthInfo, configAuthInfo)
+	} else if required {
+		return clientcmdapi.AuthInfo{}, fmt.Errorf("auth info %q does not exist", authInfoName)
+	}
+	mergo.Merge(mergedAuthInfo, config.overrides.AuthInfo)
+
+	return *mergedAuthInfo, nil
+}
+
+// getCluster returns the clientcmdapi.Cluster, or an error if a required cluster is not found.
+func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) {
+	clusterInfos := config.config.Clusters
+	clusterInfoName, required := config.getClusterName()
+
+	mergedClusterInfo := clientcmdapi.NewCluster()
+	mergo.Merge(mergedClusterInfo, config.overrides.ClusterDefaults)
+	if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists {
+		mergo.Merge(mergedClusterInfo, configClusterInfo)
+	} else if required {
+		return clientcmdapi.Cluster{}, fmt.Errorf("cluster %q does not exist", clusterInfoName)
+	}
+	mergo.Merge(mergedClusterInfo, config.overrides.ClusterInfo)
+	// An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data
+	// otherwise, a kubeconfig containing a CA reference would return an error that "CA and insecure-skip-tls-verify couldn't both be set"
+	caLen := len(config.overrides.ClusterInfo.CertificateAuthority)
+	caDataLen := len(config.overrides.ClusterInfo.CertificateAuthorityData)
+	if config.overrides.ClusterInfo.InsecureSkipTLSVerify && caLen == 0 && caDataLen == 0 {
+		mergedClusterInfo.CertificateAuthority = ""
+		mergedClusterInfo.CertificateAuthorityData = nil
+	}
+
+	return *mergedClusterInfo, nil
+}
+
+// inClusterClientConfig makes a config that will work from within a kubernetes cluster container environment.
+// Can take options overrides for flags explicitly provided to the command inside the cluster container.
+type inClusterClientConfig struct {
+	overrides               *ConfigOverrides
+	inClusterConfigProvider func() (*restclient.Config, error)
+}
+
+var _ ClientConfig = &inClusterClientConfig{}
+
+func (config *inClusterClientConfig) RawConfig() (clientcmdapi.Config, error) {
+	return clientcmdapi.Config{}, fmt.Errorf("inCluster environment config doesn't support multiple clusters")
+}
+
+func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error) {
+	if config.inClusterConfigProvider == nil {
+		config.inClusterConfigProvider = restclient.InClusterConfig
+	}
+
+	icc, err := config.inClusterConfigProvider()
+	if err != nil {
+		return nil, err
+	}
+
+	// in-cluster configs only takes a host, token, or CA file
+	// if any of them were individually provided, overwrite anything else
+	if config.overrides != nil {
+		if server := config.overrides.ClusterInfo.Server; len(server) > 0 {
+			icc.Host = server
+		}
+		if token := config.overrides.AuthInfo.Token; len(token) > 0 {
+			icc.BearerToken = token
+		}
+		if certificateAuthorityFile := config.overrides.ClusterInfo.CertificateAuthority; len(certificateAuthorityFile) > 0 {
+			icc.TLSClientConfig.CAFile = certificateAuthorityFile
+		}
+	}
+
+	return icc, err
+}
+
+func (config *inClusterClientConfig) Namespace() (string, bool, error) {
+	// This way assumes you've set the POD_NAMESPACE environment variable using the downward API.
+	// This check has to be done first for backwards compatibility with the way InClusterConfig was originally set up
+	if ns := os.Getenv("POD_NAMESPACE"); ns != "" {
+		return ns, false, nil
+	}
+
+	// Fall back to the namespace associated with the service account token, if available
+	if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil {
+		if ns := strings.TrimSpace(string(data)); len(ns) > 0 {
+			return ns, false, nil
+		}
+	}
+
+	return "default", false, nil
+}
+
+func (config *inClusterClientConfig) ConfigAccess() ConfigAccess {
+	return NewDefaultClientConfigLoadingRules()
+}
+
+// Possible returns true if loading an inside-kubernetes-cluster is possible.
+func (config *inClusterClientConfig) Possible() bool {
+	fi, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount/token")
+	return os.Getenv("KUBERNETES_SERVICE_HOST") != "" &&
+		os.Getenv("KUBERNETES_SERVICE_PORT") != "" &&
+		err == nil && !fi.IsDir()
+}
+
+// BuildConfigFromFlags is a helper function that builds configs from a master
+// url or a kubeconfig filepath. These are passed in as command line flags for cluster
+// components. Warnings should reflect this usage. If neither masterUrl or kubeconfigPath
+// are passed in we fallback to inClusterConfig. If inClusterConfig fails, we fallback
+// to the default config.
+func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, error) {
+	if kubeconfigPath == "" && masterUrl == "" {
+		glog.Warningf("Neither --kubeconfig nor --master was specified.  Using the inClusterConfig.  This might not work.")
+		kubeconfig, err := restclient.InClusterConfig()
+		if err == nil {
+			return kubeconfig, nil
+		}
+		glog.Warning("error creating inClusterConfig, falling back to default config: ", err)
+	}
+	return NewNonInteractiveDeferredLoadingClientConfig(
+		&ClientConfigLoadingRules{ExplicitPath: kubeconfigPath},
+		&ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}).ClientConfig()
+}
+
+// BuildConfigFromKubeconfigGetter is a helper function that builds configs from a master
+// url and a kubeconfigGetter.
+func BuildConfigFromKubeconfigGetter(masterUrl string, kubeconfigGetter KubeconfigGetter) (*restclient.Config, error) {
+	// TODO: We do not need a DeferredLoader here. Refactor code and see if we can use DirectClientConfig here.
+	cc := NewNonInteractiveDeferredLoadingClientConfig(
+		&ClientConfigGetter{kubeconfigGetter: kubeconfigGetter},
+		&ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}})
+	return cc.ClientConfig()
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/config.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/config.go
new file mode 100644
index 0000000..7092c5b
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/config.go
@@ -0,0 +1,474 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+	"errors"
+	"os"
+	"path"
+	"path/filepath"
+	"reflect"
+	"sort"
+
+	"github.com/golang/glog"
+
+	restclient "k8s.io/client-go/rest"
+	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+)
+
+// ConfigAccess is used by subcommands and methods in this package to load and modify the appropriate config files
+type ConfigAccess interface {
+	// GetLoadingPrecedence returns the slice of files that should be used for loading and inspecting the config
+	GetLoadingPrecedence() []string
+	// GetStartingConfig returns the config that subcommands should being operating against.  It may or may not be merged depending on loading rules
+	GetStartingConfig() (*clientcmdapi.Config, error)
+	// GetDefaultFilename returns the name of the file you should write into (create if necessary), if you're trying to create a new stanza as opposed to updating an existing one.
+	GetDefaultFilename() string
+	// IsExplicitFile indicates whether or not this command is interested in exactly one file.  This implementation only ever does that  via a flag, but implementations that handle local, global, and flags may have more
+	IsExplicitFile() bool
+	// GetExplicitFile returns the particular file this command is operating against.  This implementation only ever has one, but implementations that handle local, global, and flags may have more
+	GetExplicitFile() string
+}
+
+type PathOptions struct {
+	// GlobalFile is the full path to the file to load as the global (final) option
+	GlobalFile string
+	// EnvVar is the env var name that points to the list of kubeconfig files to load
+	EnvVar string
+	// ExplicitFileFlag is the name of the flag to use for prompting for the kubeconfig file
+	ExplicitFileFlag string
+
+	// GlobalFileSubpath is an optional value used for displaying help
+	GlobalFileSubpath string
+
+	LoadingRules *ClientConfigLoadingRules
+}
+
+func (o *PathOptions) GetEnvVarFiles() []string {
+	if len(o.EnvVar) == 0 {
+		return []string{}
+	}
+
+	envVarValue := os.Getenv(o.EnvVar)
+	if len(envVarValue) == 0 {
+		return []string{}
+	}
+
+	fileList := filepath.SplitList(envVarValue)
+	// prevent the same path load multiple times
+	return deduplicate(fileList)
+}
+
+func (o *PathOptions) GetLoadingPrecedence() []string {
+	if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 {
+		return envVarFiles
+	}
+
+	return []string{o.GlobalFile}
+}
+
+func (o *PathOptions) GetStartingConfig() (*clientcmdapi.Config, error) {
+	// don't mutate the original
+	loadingRules := *o.LoadingRules
+	loadingRules.Precedence = o.GetLoadingPrecedence()
+
+	clientConfig := NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, &ConfigOverrides{})
+	rawConfig, err := clientConfig.RawConfig()
+	if os.IsNotExist(err) {
+		return clientcmdapi.NewConfig(), nil
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	return &rawConfig, nil
+}
+
+func (o *PathOptions) GetDefaultFilename() string {
+	if o.IsExplicitFile() {
+		return o.GetExplicitFile()
+	}
+
+	if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 {
+		if len(envVarFiles) == 1 {
+			return envVarFiles[0]
+		}
+
+		// if any of the envvar files already exists, return it
+		for _, envVarFile := range envVarFiles {
+			if _, err := os.Stat(envVarFile); err == nil {
+				return envVarFile
+			}
+		}
+
+		// otherwise, return the last one in the list
+		return envVarFiles[len(envVarFiles)-1]
+	}
+
+	return o.GlobalFile
+}
+
+func (o *PathOptions) IsExplicitFile() bool {
+	if len(o.LoadingRules.ExplicitPath) > 0 {
+		return true
+	}
+
+	return false
+}
+
+func (o *PathOptions) GetExplicitFile() string {
+	return o.LoadingRules.ExplicitPath
+}
+
+func NewDefaultPathOptions() *PathOptions {
+	ret := &PathOptions{
+		GlobalFile:       RecommendedHomeFile,
+		EnvVar:           RecommendedConfigPathEnvVar,
+		ExplicitFileFlag: RecommendedConfigPathFlag,
+
+		GlobalFileSubpath: path.Join(RecommendedHomeDir, RecommendedFileName),
+
+		LoadingRules: NewDefaultClientConfigLoadingRules(),
+	}
+	ret.LoadingRules.DoNotResolvePaths = true
+
+	return ret
+}
+
+// ModifyConfig takes a Config object, iterates through Clusters, AuthInfos, and Contexts, uses the LocationOfOrigin if specified or
+// uses the default destination file to write the results into.  This results in multiple file reads, but it's very easy to follow.
+// Preferences and CurrentContext should always be set in the default destination file.  Since we can't distinguish between empty and missing values
+// (no nil strings), we're forced have separate handling for them.  In the kubeconfig cases, newConfig should have at most one difference,
+// that means that this code will only write into a single file.  If you want to relativizePaths, you must provide a fully qualified path in any
+// modified element.
+func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, relativizePaths bool) error {
+	possibleSources := configAccess.GetLoadingPrecedence()
+	// sort the possible kubeconfig files so we always "lock" in the same order
+	// to avoid deadlock (note: this can fail w/ symlinks, but... come on).
+	sort.Strings(possibleSources)
+	for _, filename := range possibleSources {
+		if err := lockFile(filename); err != nil {
+			return err
+		}
+		defer unlockFile(filename)
+	}
+
+	startingConfig, err := configAccess.GetStartingConfig()
+	if err != nil {
+		return err
+	}
+
+	// We need to find all differences, locate their original files, read a partial config to modify only that stanza and write out the file.
+	// Special case the test for current context and preferences since those always write to the default file.
+	if reflect.DeepEqual(*startingConfig, newConfig) {
+		// nothing to do
+		return nil
+	}
+
+	if startingConfig.CurrentContext != newConfig.CurrentContext {
+		if err := writeCurrentContext(configAccess, newConfig.CurrentContext); err != nil {
+			return err
+		}
+	}
+
+	if !reflect.DeepEqual(startingConfig.Preferences, newConfig.Preferences) {
+		if err := writePreferences(configAccess, newConfig.Preferences); err != nil {
+			return err
+		}
+	}
+
+	// Search every cluster, authInfo, and context.  First from new to old for differences, then from old to new for deletions
+	for key, cluster := range newConfig.Clusters {
+		startingCluster, exists := startingConfig.Clusters[key]
+		if !reflect.DeepEqual(cluster, startingCluster) || !exists {
+			destinationFile := cluster.LocationOfOrigin
+			if len(destinationFile) == 0 {
+				destinationFile = configAccess.GetDefaultFilename()
+			}
+
+			configToWrite, err := getConfigFromFile(destinationFile)
+			if err != nil {
+				return err
+			}
+			t := *cluster
+
+			configToWrite.Clusters[key] = &t
+			configToWrite.Clusters[key].LocationOfOrigin = destinationFile
+			if relativizePaths {
+				if err := RelativizeClusterLocalPaths(configToWrite.Clusters[key]); err != nil {
+					return err
+				}
+			}
+
+			if err := WriteToFile(*configToWrite, destinationFile); err != nil {
+				return err
+			}
+		}
+	}
+
+	for key, context := range newConfig.Contexts {
+		startingContext, exists := startingConfig.Contexts[key]
+		if !reflect.DeepEqual(context, startingContext) || !exists {
+			destinationFile := context.LocationOfOrigin
+			if len(destinationFile) == 0 {
+				destinationFile = configAccess.GetDefaultFilename()
+			}
+
+			configToWrite, err := getConfigFromFile(destinationFile)
+			if err != nil {
+				return err
+			}
+			configToWrite.Contexts[key] = context
+
+			if err := WriteToFile(*configToWrite, destinationFile); err != nil {
+				return err
+			}
+		}
+	}
+
+	for key, authInfo := range newConfig.AuthInfos {
+		startingAuthInfo, exists := startingConfig.AuthInfos[key]
+		if !reflect.DeepEqual(authInfo, startingAuthInfo) || !exists {
+			destinationFile := authInfo.LocationOfOrigin
+			if len(destinationFile) == 0 {
+				destinationFile = configAccess.GetDefaultFilename()
+			}
+
+			configToWrite, err := getConfigFromFile(destinationFile)
+			if err != nil {
+				return err
+			}
+			t := *authInfo
+			configToWrite.AuthInfos[key] = &t
+			configToWrite.AuthInfos[key].LocationOfOrigin = destinationFile
+			if relativizePaths {
+				if err := RelativizeAuthInfoLocalPaths(configToWrite.AuthInfos[key]); err != nil {
+					return err
+				}
+			}
+
+			if err := WriteToFile(*configToWrite, destinationFile); err != nil {
+				return err
+			}
+		}
+	}
+
+	for key, cluster := range startingConfig.Clusters {
+		if _, exists := newConfig.Clusters[key]; !exists {
+			destinationFile := cluster.LocationOfOrigin
+			if len(destinationFile) == 0 {
+				destinationFile = configAccess.GetDefaultFilename()
+			}
+
+			configToWrite, err := getConfigFromFile(destinationFile)
+			if err != nil {
+				return err
+			}
+			delete(configToWrite.Clusters, key)
+
+			if err := WriteToFile(*configToWrite, destinationFile); err != nil {
+				return err
+			}
+		}
+	}
+
+	for key, context := range startingConfig.Contexts {
+		if _, exists := newConfig.Contexts[key]; !exists {
+			destinationFile := context.LocationOfOrigin
+			if len(destinationFile) == 0 {
+				destinationFile = configAccess.GetDefaultFilename()
+			}
+
+			configToWrite, err := getConfigFromFile(destinationFile)
+			if err != nil {
+				return err
+			}
+			delete(configToWrite.Contexts, key)
+
+			if err := WriteToFile(*configToWrite, destinationFile); err != nil {
+				return err
+			}
+		}
+	}
+
+	for key, authInfo := range startingConfig.AuthInfos {
+		if _, exists := newConfig.AuthInfos[key]; !exists {
+			destinationFile := authInfo.LocationOfOrigin
+			if len(destinationFile) == 0 {
+				destinationFile = configAccess.GetDefaultFilename()
+			}
+
+			configToWrite, err := getConfigFromFile(destinationFile)
+			if err != nil {
+				return err
+			}
+			delete(configToWrite.AuthInfos, key)
+
+			if err := WriteToFile(*configToWrite, destinationFile); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func PersisterForUser(configAccess ConfigAccess, user string) restclient.AuthProviderConfigPersister {
+	return &persister{configAccess, user}
+}
+
+type persister struct {
+	configAccess ConfigAccess
+	user         string
+}
+
+func (p *persister) Persist(config map[string]string) error {
+	newConfig, err := p.configAccess.GetStartingConfig()
+	if err != nil {
+		return err
+	}
+	authInfo, ok := newConfig.AuthInfos[p.user]
+	if ok && authInfo.AuthProvider != nil {
+		authInfo.AuthProvider.Config = config
+		ModifyConfig(p.configAccess, *newConfig, false)
+	}
+	return nil
+}
+
+// writeCurrentContext takes three possible paths.
+// If newCurrentContext is the same as the startingConfig's current context, then we exit.
+// If newCurrentContext has a value, then that value is written into the default destination file.
+// If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file
+func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error {
+	if startingConfig, err := configAccess.GetStartingConfig(); err != nil {
+		return err
+	} else if startingConfig.CurrentContext == newCurrentContext {
+		return nil
+	}
+
+	if configAccess.IsExplicitFile() {
+		file := configAccess.GetExplicitFile()
+		currConfig, err := getConfigFromFile(file)
+		if err != nil {
+			return err
+		}
+		currConfig.CurrentContext = newCurrentContext
+		if err := WriteToFile(*currConfig, file); err != nil {
+			return err
+		}
+
+		return nil
+	}
+
+	if len(newCurrentContext) > 0 {
+		destinationFile := configAccess.GetDefaultFilename()
+		config, err := getConfigFromFile(destinationFile)
+		if err != nil {
+			return err
+		}
+		config.CurrentContext = newCurrentContext
+
+		if err := WriteToFile(*config, destinationFile); err != nil {
+			return err
+		}
+
+		return nil
+	}
+
+	// we're supposed to be clearing the current context.  We need to find the first spot in the chain that is setting it and clear it
+	for _, file := range configAccess.GetLoadingPrecedence() {
+		if _, err := os.Stat(file); err == nil {
+			currConfig, err := getConfigFromFile(file)
+			if err != nil {
+				return err
+			}
+
+			if len(currConfig.CurrentContext) > 0 {
+				currConfig.CurrentContext = newCurrentContext
+				if err := WriteToFile(*currConfig, file); err != nil {
+					return err
+				}
+
+				return nil
+			}
+		}
+	}
+
+	return errors.New("no config found to write context")
+}
+
+func writePreferences(configAccess ConfigAccess, newPrefs clientcmdapi.Preferences) error {
+	if startingConfig, err := configAccess.GetStartingConfig(); err != nil {
+		return err
+	} else if reflect.DeepEqual(startingConfig.Preferences, newPrefs) {
+		return nil
+	}
+
+	if configAccess.IsExplicitFile() {
+		file := configAccess.GetExplicitFile()
+		currConfig, err := getConfigFromFile(file)
+		if err != nil {
+			return err
+		}
+		currConfig.Preferences = newPrefs
+		if err := WriteToFile(*currConfig, file); err != nil {
+			return err
+		}
+
+		return nil
+	}
+
+	for _, file := range configAccess.GetLoadingPrecedence() {
+		currConfig, err := getConfigFromFile(file)
+		if err != nil {
+			return err
+		}
+
+		if !reflect.DeepEqual(currConfig.Preferences, newPrefs) {
+			currConfig.Preferences = newPrefs
+			if err := WriteToFile(*currConfig, file); err != nil {
+				return err
+			}
+
+			return nil
+		}
+	}
+
+	return errors.New("no config found to write preferences")
+}
+
+// getConfigFromFile tries to read a kubeconfig file and if it can't, returns an error.  One exception, missing files result in empty configs, not an error.
+func getConfigFromFile(filename string) (*clientcmdapi.Config, error) {
+	config, err := LoadFromFile(filename)
+	if err != nil && !os.IsNotExist(err) {
+		return nil, err
+	}
+	if config == nil {
+		config = clientcmdapi.NewConfig()
+	}
+	return config, nil
+}
+
+// GetConfigFromFileOrDie tries to read a kubeconfig file and if it can't, it calls exit.  One exception, missing files result in empty configs, not an exit
+func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config {
+	config, err := getConfigFromFile(filename)
+	if err != nil {
+		glog.FatalDepth(1, err)
+	}
+
+	return config
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/doc.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/doc.go
new file mode 100644
index 0000000..424311e
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/doc.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package clientcmd provides one stop shopping for building a working client from a fixed config,
+from a .kubeconfig file, from command line flags, or from any merged combination.
+
+Sample usage from merged .kubeconfig files (local directory, home directory)
+
+	loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
+	// if you want to change the loading rules (which files in which order), you can do so here
+
+	configOverrides := &clientcmd.ConfigOverrides{}
+	// if you want to change override values or bind them to flags, there are methods to help you
+
+	kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
+	config, err := kubeConfig.ClientConfig()
+	if err != nil {
+		// Do something
+	}
+	client, err := metav1.New(config)
+	// ...
+*/
+package clientcmd // import "k8s.io/client-go/tools/clientcmd"
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/flag.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/flag.go
new file mode 100644
index 0000000..8d60d20
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/flag.go
@@ -0,0 +1,49 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+// transformingStringValue implements pflag.Value to store string values,
+// allowing transforming them while being set
+type transformingStringValue struct {
+	target      *string
+	transformer func(string) (string, error)
+}
+
+func newTransformingStringValue(val string, target *string, transformer func(string) (string, error)) *transformingStringValue {
+	*target = val
+	return &transformingStringValue{
+		target:      target,
+		transformer: transformer,
+	}
+}
+
+func (t *transformingStringValue) Set(val string) error {
+	val, err := t.transformer(val)
+	if err != nil {
+		return err
+	}
+	*t.target = val
+	return nil
+}
+
+func (t *transformingStringValue) Type() string {
+	return "string"
+}
+
+func (t *transformingStringValue) String() string {
+	return string(*t.target)
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/helpers.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/helpers.go
new file mode 100644
index 0000000..b609d1a
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/helpers.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+	"fmt"
+	"strconv"
+	"time"
+)
+
+// ParseTimeout returns a parsed duration from a string
+// A duration string value must be a positive integer, optionally followed by a corresponding time unit (s|m|h).
+func ParseTimeout(duration string) (time.Duration, error) {
+	if i, err := strconv.ParseInt(duration, 10, 64); err == nil && i >= 0 {
+		return (time.Duration(i) * time.Second), nil
+	}
+	if requestTimeout, err := time.ParseDuration(duration); err == nil {
+		return requestTimeout, nil
+	}
+	return 0, fmt.Errorf("Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h)")
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/loader.go
new file mode 100644
index 0000000..3442475
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/loader.go
@@ -0,0 +1,633 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+	"path/filepath"
+	"reflect"
+	goruntime "runtime"
+	"strings"
+
+	"github.com/golang/glog"
+	"github.com/imdario/mergo"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+	restclient "k8s.io/client-go/rest"
+	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+	clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest"
+	"k8s.io/client-go/util/homedir"
+)
+
+const (
+	RecommendedConfigPathFlag   = "kubeconfig"
+	RecommendedConfigPathEnvVar = "KUBECONFIG"
+	RecommendedHomeDir          = ".kube"
+	RecommendedFileName         = "config"
+	RecommendedSchemaName       = "schema"
+)
+
+var (
+	RecommendedConfigDir  = path.Join(homedir.HomeDir(), RecommendedHomeDir)
+	RecommendedHomeFile   = path.Join(RecommendedConfigDir, RecommendedFileName)
+	RecommendedSchemaFile = path.Join(RecommendedConfigDir, RecommendedSchemaName)
+)
+
+// currentMigrationRules returns a map that holds the history of recommended home directories used in previous versions.
+// Any future changes to RecommendedHomeFile and related are expected to add a migration rule here, in order to make
+// sure existing config files are migrated to their new locations properly.
+func currentMigrationRules() map[string]string {
+	oldRecommendedHomeFile := path.Join(os.Getenv("HOME"), "/.kube/.kubeconfig")
+	oldRecommendedWindowsHomeFile := path.Join(os.Getenv("HOME"), RecommendedHomeDir, RecommendedFileName)
+
+	migrationRules := map[string]string{}
+	migrationRules[RecommendedHomeFile] = oldRecommendedHomeFile
+	if goruntime.GOOS == "windows" {
+		migrationRules[RecommendedHomeFile] = oldRecommendedWindowsHomeFile
+	}
+	return migrationRules
+}
+
+type ClientConfigLoader interface {
+	ConfigAccess
+	// IsDefaultConfig returns true if the returned config matches the defaults.
+	IsDefaultConfig(*restclient.Config) bool
+	// Load returns the latest config
+	Load() (*clientcmdapi.Config, error)
+}
+
+type KubeconfigGetter func() (*clientcmdapi.Config, error)
+
+type ClientConfigGetter struct {
+	kubeconfigGetter KubeconfigGetter
+}
+
+// ClientConfigGetter implements the ClientConfigLoader interface.
+var _ ClientConfigLoader = &ClientConfigGetter{}
+
+func (g *ClientConfigGetter) Load() (*clientcmdapi.Config, error) {
+	return g.kubeconfigGetter()
+}
+
+func (g *ClientConfigGetter) GetLoadingPrecedence() []string {
+	return nil
+}
+func (g *ClientConfigGetter) GetStartingConfig() (*clientcmdapi.Config, error) {
+	return g.kubeconfigGetter()
+}
+func (g *ClientConfigGetter) GetDefaultFilename() string {
+	return ""
+}
+func (g *ClientConfigGetter) IsExplicitFile() bool {
+	return false
+}
+func (g *ClientConfigGetter) GetExplicitFile() string {
+	return ""
+}
+func (g *ClientConfigGetter) IsDefaultConfig(config *restclient.Config) bool {
+	return false
+}
+
+// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config
+// Callers can put the chain together however they want, but we'd recommend:
+// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath
+// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if this file is not present
+type ClientConfigLoadingRules struct {
+	ExplicitPath string
+	Precedence   []string
+
+	// MigrationRules is a map of destination files to source files.  If a destination file is not present, then the source file is checked.
+	// If the source file is present, then it is copied to the destination file BEFORE any further loading happens.
+	MigrationRules map[string]string
+
+	// DoNotResolvePaths indicates whether or not to resolve paths with respect to the originating files.  This is phrased as a negative so
+	// that a default object that doesn't set this will usually get the behavior it wants.
+	DoNotResolvePaths bool
+
+	// DefaultClientConfig is an optional field indicating what rules to use to calculate a default configuration.
+	// This should match the overrides passed in to ClientConfig loader.
+	DefaultClientConfig ClientConfig
+}
+
+// ClientConfigLoadingRules implements the ClientConfigLoader interface.
+var _ ClientConfigLoader = &ClientConfigLoadingRules{}
+
+// NewDefaultClientConfigLoadingRules returns a ClientConfigLoadingRules object with default fields filled in.  You are not required to
+// use this constructor
+func NewDefaultClientConfigLoadingRules() *ClientConfigLoadingRules {
+	chain := []string{}
+
+	envVarFiles := os.Getenv(RecommendedConfigPathEnvVar)
+	if len(envVarFiles) != 0 {
+		fileList := filepath.SplitList(envVarFiles)
+		// prevent the same path load multiple times
+		chain = append(chain, deduplicate(fileList)...)
+
+	} else {
+		chain = append(chain, RecommendedHomeFile)
+	}
+
+	return &ClientConfigLoadingRules{
+		Precedence:     chain,
+		MigrationRules: currentMigrationRules(),
+	}
+}
+
+// Load starts by running the MigrationRules and then
+// takes the loading rules and returns a Config object based on following rules.
+//   if the ExplicitPath, return the unmerged explicit file
+//   Otherwise, return a merged config based on the Precedence slice
+// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored.
+// Read errors or files with non-deserializable content produce errors.
+// The first file to set a particular map key wins and map key's value is never changed.
+// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed.
+// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two.
+// It also means that if two files specify a "red-user", only values from the first file's red-user are used.  Even
+// non-conflicting entries from the second file's "red-user" are discarded.
+// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder
+// and only absolute file paths are returned.
+func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
+	if err := rules.Migrate(); err != nil {
+		return nil, err
+	}
+
+	errlist := []error{}
+
+	kubeConfigFiles := []string{}
+
+	// Make sure a file we were explicitly told to use exists
+	if len(rules.ExplicitPath) > 0 {
+		if _, err := os.Stat(rules.ExplicitPath); os.IsNotExist(err) {
+			return nil, err
+		}
+		kubeConfigFiles = append(kubeConfigFiles, rules.ExplicitPath)
+
+	} else {
+		kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...)
+	}
+
+	kubeconfigs := []*clientcmdapi.Config{}
+	// read and cache the config files so that we only look at them once
+	for _, filename := range kubeConfigFiles {
+		if len(filename) == 0 {
+			// no work to do
+			continue
+		}
+
+		config, err := LoadFromFile(filename)
+		if os.IsNotExist(err) {
+			// skip missing files
+			continue
+		}
+		if err != nil {
+			errlist = append(errlist, fmt.Errorf("Error loading config file \"%s\": %v", filename, err))
+			continue
+		}
+
+		kubeconfigs = append(kubeconfigs, config)
+	}
+
+	// first merge all of our maps
+	mapConfig := clientcmdapi.NewConfig()
+
+	for _, kubeconfig := range kubeconfigs {
+		mergo.Merge(mapConfig, kubeconfig)
+	}
+
+	// merge all of the struct values in the reverse order so that priority is given correctly
+	// errors are not added to the list the second time
+	nonMapConfig := clientcmdapi.NewConfig()
+	for i := len(kubeconfigs) - 1; i >= 0; i-- {
+		kubeconfig := kubeconfigs[i]
+		mergo.Merge(nonMapConfig, kubeconfig)
+	}
+
+	// since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and
+	// get the values we expect.
+	config := clientcmdapi.NewConfig()
+	mergo.Merge(config, mapConfig)
+	mergo.Merge(config, nonMapConfig)
+
+	if rules.ResolvePaths() {
+		if err := ResolveLocalPaths(config); err != nil {
+			errlist = append(errlist, err)
+		}
+	}
+	return config, utilerrors.NewAggregate(errlist)
+}
+
+// Migrate uses the MigrationRules map.  If a destination file is not present, then the source file is checked.
+// If the source file is present, then it is copied to the destination file BEFORE any further loading happens.
+func (rules *ClientConfigLoadingRules) Migrate() error {
+	if rules.MigrationRules == nil {
+		return nil
+	}
+
+	for destination, source := range rules.MigrationRules {
+		if _, err := os.Stat(destination); err == nil {
+			// if the destination already exists, do nothing
+			continue
+		} else if os.IsPermission(err) {
+			// if we can't access the file, skip it
+			continue
+		} else if !os.IsNotExist(err) {
+			// if we had an error other than non-existence, fail
+			return err
+		}
+
+		if sourceInfo, err := os.Stat(source); err != nil {
+			if os.IsNotExist(err) || os.IsPermission(err) {
+				// if the source file doesn't exist or we can't access it, there's no work to do.
+				continue
+			}
+
+			// if we had an error other than non-existence, fail
+			return err
+		} else if sourceInfo.IsDir() {
+			return fmt.Errorf("cannot migrate %v to %v because it is a directory", source, destination)
+		}
+
+		in, err := os.Open(source)
+		if err != nil {
+			return err
+		}
+		defer in.Close()
+		out, err := os.Create(destination)
+		if err != nil {
+			return err
+		}
+		defer out.Close()
+
+		if _, err = io.Copy(out, in); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// GetLoadingPrecedence implements ConfigAccess
+func (rules *ClientConfigLoadingRules) GetLoadingPrecedence() []string {
+	return rules.Precedence
+}
+
+// GetStartingConfig implements ConfigAccess
+func (rules *ClientConfigLoadingRules) GetStartingConfig() (*clientcmdapi.Config, error) {
+	clientConfig := NewNonInteractiveDeferredLoadingClientConfig(rules, &ConfigOverrides{})
+	rawConfig, err := clientConfig.RawConfig()
+	if os.IsNotExist(err) {
+		return clientcmdapi.NewConfig(), nil
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	return &rawConfig, nil
+}
+
+// GetDefaultFilename implements ConfigAccess
+func (rules *ClientConfigLoadingRules) GetDefaultFilename() string {
+	// Explicit file if we have one.
+	if rules.IsExplicitFile() {
+		return rules.GetExplicitFile()
+	}
+	// Otherwise, first existing file from precedence.
+	for _, filename := range rules.GetLoadingPrecedence() {
+		if _, err := os.Stat(filename); err == nil {
+			return filename
+		}
+	}
+	// If none exists, use the first from precedence.
+	if len(rules.Precedence) > 0 {
+		return rules.Precedence[0]
+	}
+	return ""
+}
+
+// IsExplicitFile implements ConfigAccess
+func (rules *ClientConfigLoadingRules) IsExplicitFile() bool {
+	return len(rules.ExplicitPath) > 0
+}
+
+// GetExplicitFile implements ConfigAccess
+func (rules *ClientConfigLoadingRules) GetExplicitFile() string {
+	return rules.ExplicitPath
+}
+
+// IsDefaultConfig returns true if the provided configuration matches the default
+func (rules *ClientConfigLoadingRules) IsDefaultConfig(config *restclient.Config) bool {
+	if rules.DefaultClientConfig == nil {
+		return false
+	}
+	defaultConfig, err := rules.DefaultClientConfig.ClientConfig()
+	if err != nil {
+		return false
+	}
+	return reflect.DeepEqual(config, defaultConfig)
+}
+
+// LoadFromFile takes a filename and deserializes the contents into Config object
+func LoadFromFile(filename string) (*clientcmdapi.Config, error) {
+	kubeconfigBytes, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return nil, err
+	}
+	config, err := Load(kubeconfigBytes)
+	if err != nil {
+		return nil, err
+	}
+	glog.V(6).Infoln("Config loaded from file", filename)
+
+	// set LocationOfOrigin on every Cluster, User, and Context
+	for key, obj := range config.AuthInfos {
+		obj.LocationOfOrigin = filename
+		config.AuthInfos[key] = obj
+	}
+	for key, obj := range config.Clusters {
+		obj.LocationOfOrigin = filename
+		config.Clusters[key] = obj
+	}
+	for key, obj := range config.Contexts {
+		obj.LocationOfOrigin = filename
+		config.Contexts[key] = obj
+	}
+
+	if config.AuthInfos == nil {
+		config.AuthInfos = map[string]*clientcmdapi.AuthInfo{}
+	}
+	if config.Clusters == nil {
+		config.Clusters = map[string]*clientcmdapi.Cluster{}
+	}
+	if config.Contexts == nil {
+		config.Contexts = map[string]*clientcmdapi.Context{}
+	}
+
+	return config, nil
+}
+
+// Load takes a byte slice and deserializes the contents into Config object.
+// Encapsulates deserialization without assuming the source is a file.
+func Load(data []byte) (*clientcmdapi.Config, error) {
+	config := clientcmdapi.NewConfig()
+	// if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input)
+	if len(data) == 0 {
+		return config, nil
+	}
+	decoded, _, err := clientcmdlatest.Codec.Decode(data, &schema.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"}, config)
+	if err != nil {
+		return nil, err
+	}
+	return decoded.(*clientcmdapi.Config), nil
+}
+
+// WriteToFile serializes the config to yaml and writes it out to a file.  If not present, it creates the file with the mode 0600.  If it is present
+// it stomps the contents
+func WriteToFile(config clientcmdapi.Config, filename string) error {
+	content, err := Write(config)
+	if err != nil {
+		return err
+	}
+	dir := filepath.Dir(filename)
+	if _, err := os.Stat(dir); os.IsNotExist(err) {
+		if err = os.MkdirAll(dir, 0755); err != nil {
+			return err
+		}
+	}
+
+	if err := ioutil.WriteFile(filename, content, 0600); err != nil {
+		return err
+	}
+	return nil
+}
+
+func lockFile(filename string) error {
+	// TODO: find a way to do this with actual file locks. Will
+	// probably need separate solution for windows and Linux.
+
+	// Make sure the dir exists before we try to create a lock file.
+	dir := filepath.Dir(filename)
+	if _, err := os.Stat(dir); os.IsNotExist(err) {
+		if err = os.MkdirAll(dir, 0755); err != nil {
+			return err
+		}
+	}
+	f, err := os.OpenFile(lockName(filename), os.O_CREATE|os.O_EXCL, 0)
+	if err != nil {
+		return err
+	}
+	f.Close()
+	return nil
+}
+
+func unlockFile(filename string) error {
+	return os.Remove(lockName(filename))
+}
+
+func lockName(filename string) string {
+	return filename + ".lock"
+}
+
+// Write serializes the config to yaml.
+// Encapsulates serialization without assuming the destination is a file.
+func Write(config clientcmdapi.Config) ([]byte, error) {
+	return runtime.Encode(clientcmdlatest.Codec, &config)
+}
+
+func (rules ClientConfigLoadingRules) ResolvePaths() bool {
+	return !rules.DoNotResolvePaths
+}
+
+// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin
+// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without
+// modification of its contents.
+func ResolveLocalPaths(config *clientcmdapi.Config) error {
+	for _, cluster := range config.Clusters {
+		if len(cluster.LocationOfOrigin) == 0 {
+			continue
+		}
+		base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin))
+		if err != nil {
+			return fmt.Errorf("Could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err)
+		}
+
+		if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil {
+			return err
+		}
+	}
+	for _, authInfo := range config.AuthInfos {
+		if len(authInfo.LocationOfOrigin) == 0 {
+			continue
+		}
+		base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin))
+		if err != nil {
+			return fmt.Errorf("Could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err)
+		}
+
+		if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// RelativizeClusterLocalPaths first absolutizes the paths by calling ResolveLocalPaths.  This assumes that any NEW path is already
+// absolute, but any existing path will be resolved relative to LocationOfOrigin
+func RelativizeClusterLocalPaths(cluster *clientcmdapi.Cluster) error {
+	if len(cluster.LocationOfOrigin) == 0 {
+		return fmt.Errorf("no location of origin for %s", cluster.Server)
+	}
+	base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin))
+	if err != nil {
+		return fmt.Errorf("could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err)
+	}
+
+	if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil {
+		return err
+	}
+	if err := RelativizePathWithNoBacksteps(GetClusterFileReferences(cluster), base); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// RelativizeAuthInfoLocalPaths first absolutizes the paths by calling ResolveLocalPaths.  This assumes that any NEW path is already
+// absolute, but any existing path will be resolved relative to LocationOfOrigin
+func RelativizeAuthInfoLocalPaths(authInfo *clientcmdapi.AuthInfo) error {
+	if len(authInfo.LocationOfOrigin) == 0 {
+		return fmt.Errorf("no location of origin for %v", authInfo)
+	}
+	base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin))
+	if err != nil {
+		return fmt.Errorf("could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err)
+	}
+
+	if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil {
+		return err
+	}
+	if err := RelativizePathWithNoBacksteps(GetAuthInfoFileReferences(authInfo), base); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func RelativizeConfigPaths(config *clientcmdapi.Config, base string) error {
+	return RelativizePathWithNoBacksteps(GetConfigFileReferences(config), base)
+}
+
+func ResolveConfigPaths(config *clientcmdapi.Config, base string) error {
+	return ResolvePaths(GetConfigFileReferences(config), base)
+}
+
+func GetConfigFileReferences(config *clientcmdapi.Config) []*string {
+	refs := []*string{}
+
+	for _, cluster := range config.Clusters {
+		refs = append(refs, GetClusterFileReferences(cluster)...)
+	}
+	for _, authInfo := range config.AuthInfos {
+		refs = append(refs, GetAuthInfoFileReferences(authInfo)...)
+	}
+
+	return refs
+}
+
+func GetClusterFileReferences(cluster *clientcmdapi.Cluster) []*string {
+	return []*string{&cluster.CertificateAuthority}
+}
+
+func GetAuthInfoFileReferences(authInfo *clientcmdapi.AuthInfo) []*string {
+	s := []*string{&authInfo.ClientCertificate, &authInfo.ClientKey, &authInfo.TokenFile}
+	// Only resolve exec command if it isn't PATH based.
+	if authInfo.Exec != nil && strings.ContainsRune(authInfo.Exec.Command, filepath.Separator) {
+		s = append(s, &authInfo.Exec.Command)
+	}
+	return s
+}
+
+// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory
+func ResolvePaths(refs []*string, base string) error {
+	for _, ref := range refs {
+		// Don't resolve empty paths
+		if len(*ref) > 0 {
+			// Don't resolve absolute paths
+			if !filepath.IsAbs(*ref) {
+				*ref = filepath.Join(base, *ref)
+			}
+		}
+	}
+	return nil
+}
+
+// RelativizePathWithNoBacksteps updates the given refs to be relative paths, relative to the given base directory as long as they do not require backsteps.
+// Any path requiring a backstep is left as-is as long it is absolute.  Any non-absolute path that can't be relativized produces an error
+func RelativizePathWithNoBacksteps(refs []*string, base string) error {
+	for _, ref := range refs {
+		// Don't relativize empty paths
+		if len(*ref) > 0 {
+			rel, err := MakeRelative(*ref, base)
+			if err != nil {
+				return err
+			}
+
+			// if we have a backstep, don't mess with the path
+			if strings.HasPrefix(rel, "../") {
+				if filepath.IsAbs(*ref) {
+					continue
+				}
+
+				return fmt.Errorf("%v requires backsteps and is not absolute", *ref)
+			}
+
+			*ref = rel
+		}
+	}
+	return nil
+}
+
+func MakeRelative(path, base string) (string, error) {
+	if len(path) > 0 {
+		rel, err := filepath.Rel(base, path)
+		if err != nil {
+			return path, err
+		}
+		return rel, nil
+	}
+	return path, nil
+}
+
+// deduplicate removes any duplicated values and returns a new slice, keeping the order unchanged
+func deduplicate(s []string) []string {
+	encountered := map[string]bool{}
+	ret := make([]string, 0)
+	for i := range s {
+		if encountered[s[i]] {
+			continue
+		}
+		encountered[s[i]] = true
+		ret = append(ret, s[i])
+	}
+	return ret
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go
new file mode 100644
index 0000000..0503813
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go
@@ -0,0 +1,168 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+	"io"
+	"sync"
+
+	"github.com/golang/glog"
+
+	restclient "k8s.io/client-go/rest"
+	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+)
+
+// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a client config loader.
+// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that
+// the most recent rules are used.  This is useful in cases where you bind flags to loading rule parameters before
+// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid
+// passing extraneous information down a call stack
+type DeferredLoadingClientConfig struct {
+	loader         ClientConfigLoader
+	overrides      *ConfigOverrides
+	fallbackReader io.Reader
+
+	clientConfig ClientConfig
+	loadingLock  sync.Mutex
+
+	// provided for testing
+	icc InClusterConfig
+}
+
+// InClusterConfig abstracts details of whether the client is running in a cluster for testing.
+type InClusterConfig interface {
+	ClientConfig
+	Possible() bool
+}
+
+// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name
+func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig {
+	return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}}
+}
+
+// NewInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name and the fallback auth reader
+func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig {
+	return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}, fallbackReader: fallbackReader}
+}
+
+func (config *DeferredLoadingClientConfig) createClientConfig() (ClientConfig, error) {
+	if config.clientConfig == nil {
+		config.loadingLock.Lock()
+		defer config.loadingLock.Unlock()
+
+		if config.clientConfig == nil {
+			mergedConfig, err := config.loader.Load()
+			if err != nil {
+				return nil, err
+			}
+
+			var mergedClientConfig ClientConfig
+			if config.fallbackReader != nil {
+				mergedClientConfig = NewInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.fallbackReader, config.loader)
+			} else {
+				mergedClientConfig = NewNonInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.loader)
+			}
+
+			config.clientConfig = mergedClientConfig
+		}
+	}
+
+	return config.clientConfig, nil
+}
+
+func (config *DeferredLoadingClientConfig) RawConfig() (clientcmdapi.Config, error) {
+	mergedConfig, err := config.createClientConfig()
+	if err != nil {
+		return clientcmdapi.Config{}, err
+	}
+
+	return mergedConfig.RawConfig()
+}
+
+// ClientConfig implements ClientConfig
+func (config *DeferredLoadingClientConfig) ClientConfig() (*restclient.Config, error) {
+	mergedClientConfig, err := config.createClientConfig()
+	if err != nil {
+		return nil, err
+	}
+
+	// load the configuration and return on non-empty errors and if the
+	// content differs from the default config
+	mergedConfig, err := mergedClientConfig.ClientConfig()
+	switch {
+	case err != nil:
+		if !IsEmptyConfig(err) {
+			// return on any error except empty config
+			return nil, err
+		}
+	case mergedConfig != nil:
+		// the configuration is valid, but if this is equal to the defaults we should try
+		// in-cluster configuration
+		if !config.loader.IsDefaultConfig(mergedConfig) {
+			return mergedConfig, nil
+		}
+	}
+
+	// check for in-cluster configuration and use it
+	if config.icc.Possible() {
+		glog.V(4).Infof("Using in-cluster configuration")
+		return config.icc.ClientConfig()
+	}
+
+	// return the result of the merged client config
+	return mergedConfig, err
+}
+
+// Namespace implements KubeConfig
+func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) {
+	mergedKubeConfig, err := config.createClientConfig()
+	if err != nil {
+		return "", false, err
+	}
+
+	ns, overridden, err := mergedKubeConfig.Namespace()
+	// if we get an error and it is not empty config, or if the merged config defined an explicit namespace, or
+	// if in-cluster config is not possible, return immediately
+	if (err != nil && !IsEmptyConfig(err)) || overridden || !config.icc.Possible() {
+		// return on any error except empty config
+		return ns, overridden, err
+	}
+
+	if len(ns) > 0 {
+		// if we got a non-default namespace from the kubeconfig, use it
+		if ns != "default" {
+			return ns, false, nil
+		}
+
+		// if we got a default namespace, determine whether it was explicit or implicit
+		if raw, err := mergedKubeConfig.RawConfig(); err == nil {
+			if context := raw.Contexts[raw.CurrentContext]; context != nil && len(context.Namespace) > 0 {
+				return ns, false, nil
+			}
+		}
+	}
+
+	glog.V(4).Infof("Using in-cluster namespace")
+
+	// allow the namespace from the service account token directory to be used.
+	return config.icc.Namespace()
+}
+
+// ConfigAccess implements ClientConfig
+func (config *DeferredLoadingClientConfig) ConfigAccess() ConfigAccess {
+	return config.loader
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/overrides.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/overrides.go
new file mode 100644
index 0000000..bfca032
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/overrides.go
@@ -0,0 +1,247 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+	"strconv"
+	"strings"
+
+	"github.com/spf13/pflag"
+
+	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+)
+
+// ConfigOverrides holds values that should override whatever information is pulled from the actual Config object.  You can't
+// simply use an actual Config object, because Configs hold maps, but overrides are restricted to "at most one"
+type ConfigOverrides struct {
+	AuthInfo clientcmdapi.AuthInfo
+	// ClusterDefaults are applied before the configured cluster info is loaded.
+	ClusterDefaults clientcmdapi.Cluster
+	ClusterInfo     clientcmdapi.Cluster
+	Context         clientcmdapi.Context
+	CurrentContext  string
+	Timeout         string
+}
+
+// ConfigOverrideFlags holds the flag names to be used for binding command line flags. Notice that this structure tightly
+// corresponds to ConfigOverrides
+type ConfigOverrideFlags struct {
+	AuthOverrideFlags    AuthOverrideFlags
+	ClusterOverrideFlags ClusterOverrideFlags
+	ContextOverrideFlags ContextOverrideFlags
+	CurrentContext       FlagInfo
+	Timeout              FlagInfo
+}
+
+// AuthOverrideFlags holds the flag names to be used for binding command line flags for AuthInfo objects
+type AuthOverrideFlags struct {
+	ClientCertificate FlagInfo
+	ClientKey         FlagInfo
+	Token             FlagInfo
+	Impersonate       FlagInfo
+	ImpersonateGroups FlagInfo
+	Username          FlagInfo
+	Password          FlagInfo
+}
+
+// ContextOverrideFlags holds the flag names to be used for binding command line flags for Cluster objects
+type ContextOverrideFlags struct {
+	ClusterName  FlagInfo
+	AuthInfoName FlagInfo
+	Namespace    FlagInfo
+}
+
+// ClusterOverride holds the flag names to be used for binding command line flags for Cluster objects
+type ClusterOverrideFlags struct {
+	APIServer             FlagInfo
+	APIVersion            FlagInfo
+	CertificateAuthority  FlagInfo
+	InsecureSkipTLSVerify FlagInfo
+}
+
+// FlagInfo contains information about how to register a flag.  This struct is useful if you want to provide a way for an extender to
+// get back a set of recommended flag names, descriptions, and defaults, but allow for customization by an extender.  This makes for
+// coherent extension, without full prescription
+type FlagInfo struct {
+	// LongName is the long string for a flag.  If this is empty, then the flag will not be bound
+	LongName string
+	// ShortName is the single character for a flag.  If this is empty, then there will be no short flag
+	ShortName string
+	// Default is the default value for the flag
+	Default string
+	// Description is the description for the flag
+	Description string
+}
+
+// AddSecretAnnotation add secret flag to Annotation.
+func (f FlagInfo) AddSecretAnnotation(flags *pflag.FlagSet) FlagInfo {
+	flags.SetAnnotation(f.LongName, "classified", []string{"true"})
+	return f
+}
+
+// BindStringFlag binds the flag based on the provided info.  If LongName == "", nothing is registered
+func (f FlagInfo) BindStringFlag(flags *pflag.FlagSet, target *string) FlagInfo {
+	// you can't register a flag without a long name
+	if len(f.LongName) > 0 {
+		flags.StringVarP(target, f.LongName, f.ShortName, f.Default, f.Description)
+	}
+	return f
+}
+
+// BindTransformingStringFlag binds the flag based on the provided info.  If LongName == "", nothing is registered
+func (f FlagInfo) BindTransformingStringFlag(flags *pflag.FlagSet, target *string, transformer func(string) (string, error)) FlagInfo {
+	// you can't register a flag without a long name
+	if len(f.LongName) > 0 {
+		flags.VarP(newTransformingStringValue(f.Default, target, transformer), f.LongName, f.ShortName, f.Description)
+	}
+	return f
+}
+
+// BindStringSliceFlag binds the flag based on the provided info.  If LongName == "", nothing is registered
+func (f FlagInfo) BindStringArrayFlag(flags *pflag.FlagSet, target *[]string) FlagInfo {
+	// you can't register a flag without a long name
+	if len(f.LongName) > 0 {
+		sliceVal := []string{}
+		if len(f.Default) > 0 {
+			sliceVal = []string{f.Default}
+		}
+		flags.StringArrayVarP(target, f.LongName, f.ShortName, sliceVal, f.Description)
+	}
+	return f
+}
+
+// BindBoolFlag binds the flag based on the provided info.  If LongName == "", nothing is registered
+func (f FlagInfo) BindBoolFlag(flags *pflag.FlagSet, target *bool) FlagInfo {
+	// you can't register a flag without a long name
+	if len(f.LongName) > 0 {
+		// try to parse Default as a bool.  If it fails, assume false
+		boolVal, err := strconv.ParseBool(f.Default)
+		if err != nil {
+			boolVal = false
+		}
+
+		flags.BoolVarP(target, f.LongName, f.ShortName, boolVal, f.Description)
+	}
+	return f
+}
+
+const (
+	FlagClusterName      = "cluster"
+	FlagAuthInfoName     = "user"
+	FlagContext          = "context"
+	FlagNamespace        = "namespace"
+	FlagAPIServer        = "server"
+	FlagInsecure         = "insecure-skip-tls-verify"
+	FlagCertFile         = "client-certificate"
+	FlagKeyFile          = "client-key"
+	FlagCAFile           = "certificate-authority"
+	FlagEmbedCerts       = "embed-certs"
+	FlagBearerToken      = "token"
+	FlagImpersonate      = "as"
+	FlagImpersonateGroup = "as-group"
+	FlagUsername         = "username"
+	FlagPassword         = "password"
+	FlagTimeout          = "request-timeout"
+)
+
+// RecommendedConfigOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing
+func RecommendedConfigOverrideFlags(prefix string) ConfigOverrideFlags {
+	return ConfigOverrideFlags{
+		AuthOverrideFlags:    RecommendedAuthOverrideFlags(prefix),
+		ClusterOverrideFlags: RecommendedClusterOverrideFlags(prefix),
+		ContextOverrideFlags: RecommendedContextOverrideFlags(prefix),
+
+		CurrentContext: FlagInfo{prefix + FlagContext, "", "", "The name of the kubeconfig context to use"},
+		Timeout:        FlagInfo{prefix + FlagTimeout, "", "0", "The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests."},
+	}
+}
+
+// RecommendedAuthOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing
+func RecommendedAuthOverrideFlags(prefix string) AuthOverrideFlags {
+	return AuthOverrideFlags{
+		ClientCertificate: FlagInfo{prefix + FlagCertFile, "", "", "Path to a client certificate file for TLS"},
+		ClientKey:         FlagInfo{prefix + FlagKeyFile, "", "", "Path to a client key file for TLS"},
+		Token:             FlagInfo{prefix + FlagBearerToken, "", "", "Bearer token for authentication to the API server"},
+		Impersonate:       FlagInfo{prefix + FlagImpersonate, "", "", "Username to impersonate for the operation"},
+		ImpersonateGroups: FlagInfo{prefix + FlagImpersonateGroup, "", "", "Group to impersonate for the operation, this flag can be repeated to specify multiple groups."},
+		Username:          FlagInfo{prefix + FlagUsername, "", "", "Username for basic authentication to the API server"},
+		Password:          FlagInfo{prefix + FlagPassword, "", "", "Password for basic authentication to the API server"},
+	}
+}
+
+// RecommendedClusterOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing
+func RecommendedClusterOverrideFlags(prefix string) ClusterOverrideFlags {
+	return ClusterOverrideFlags{
+		APIServer:             FlagInfo{prefix + FlagAPIServer, "", "", "The address and port of the Kubernetes API server"},
+		CertificateAuthority:  FlagInfo{prefix + FlagCAFile, "", "", "Path to a cert file for the certificate authority"},
+		InsecureSkipTLSVerify: FlagInfo{prefix + FlagInsecure, "", "false", "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure"},
+	}
+}
+
+// RecommendedContextOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing
+func RecommendedContextOverrideFlags(prefix string) ContextOverrideFlags {
+	return ContextOverrideFlags{
+		ClusterName:  FlagInfo{prefix + FlagClusterName, "", "", "The name of the kubeconfig cluster to use"},
+		AuthInfoName: FlagInfo{prefix + FlagAuthInfoName, "", "", "The name of the kubeconfig user to use"},
+		Namespace:    FlagInfo{prefix + FlagNamespace, "n", "", "If present, the namespace scope for this CLI request"},
+	}
+}
+
+// BindOverrideFlags is a convenience method to bind the specified flags to their associated variables
+func BindOverrideFlags(overrides *ConfigOverrides, flags *pflag.FlagSet, flagNames ConfigOverrideFlags) {
+	BindAuthInfoFlags(&overrides.AuthInfo, flags, flagNames.AuthOverrideFlags)
+	BindClusterFlags(&overrides.ClusterInfo, flags, flagNames.ClusterOverrideFlags)
+	BindContextFlags(&overrides.Context, flags, flagNames.ContextOverrideFlags)
+	flagNames.CurrentContext.BindStringFlag(flags, &overrides.CurrentContext)
+	flagNames.Timeout.BindStringFlag(flags, &overrides.Timeout)
+}
+
+// BindAuthInfoFlags is a convenience method to bind the specified flags to their associated variables
+func BindAuthInfoFlags(authInfo *clientcmdapi.AuthInfo, flags *pflag.FlagSet, flagNames AuthOverrideFlags) {
+	flagNames.ClientCertificate.BindStringFlag(flags, &authInfo.ClientCertificate).AddSecretAnnotation(flags)
+	flagNames.ClientKey.BindStringFlag(flags, &authInfo.ClientKey).AddSecretAnnotation(flags)
+	flagNames.Token.BindStringFlag(flags, &authInfo.Token).AddSecretAnnotation(flags)
+	flagNames.Impersonate.BindStringFlag(flags, &authInfo.Impersonate).AddSecretAnnotation(flags)
+	flagNames.ImpersonateGroups.BindStringArrayFlag(flags, &authInfo.ImpersonateGroups).AddSecretAnnotation(flags)
+	flagNames.Username.BindStringFlag(flags, &authInfo.Username).AddSecretAnnotation(flags)
+	flagNames.Password.BindStringFlag(flags, &authInfo.Password).AddSecretAnnotation(flags)
+}
+
+// BindClusterFlags is a convenience method to bind the specified flags to their associated variables
+func BindClusterFlags(clusterInfo *clientcmdapi.Cluster, flags *pflag.FlagSet, flagNames ClusterOverrideFlags) {
+	flagNames.APIServer.BindStringFlag(flags, &clusterInfo.Server)
+	flagNames.CertificateAuthority.BindStringFlag(flags, &clusterInfo.CertificateAuthority)
+	flagNames.InsecureSkipTLSVerify.BindBoolFlag(flags, &clusterInfo.InsecureSkipTLSVerify)
+}
+
+// BindFlags is a convenience method to bind the specified flags to their associated variables
+func BindContextFlags(contextInfo *clientcmdapi.Context, flags *pflag.FlagSet, flagNames ContextOverrideFlags) {
+	flagNames.ClusterName.BindStringFlag(flags, &contextInfo.Cluster)
+	flagNames.AuthInfoName.BindStringFlag(flags, &contextInfo.AuthInfo)
+	flagNames.Namespace.BindTransformingStringFlag(flags, &contextInfo.Namespace, RemoveNamespacesPrefix)
+}
+
+// RemoveNamespacesPrefix is a transformer that strips "ns/", "namespace/" and "namespaces/" prefixes case-insensitively
+func RemoveNamespacesPrefix(value string) (string, error) {
+	for _, prefix := range []string{"namespaces/", "namespace/", "ns/"} {
+		if len(value) > len(prefix) && strings.EqualFold(value[0:len(prefix)], prefix) {
+			value = value[len(prefix):]
+			break
+		}
+	}
+	return value, nil
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/validation.go b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/validation.go
new file mode 100644
index 0000000..629c0b3
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/clientcmd/validation.go
@@ -0,0 +1,298 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"reflect"
+	"strings"
+
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+	"k8s.io/apimachinery/pkg/util/validation"
+	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+)
+
+var (
+	ErrNoContext   = errors.New("no context chosen")
+	ErrEmptyConfig = errors.New("no configuration has been provided")
+	// message is for consistency with old behavior
+	ErrEmptyCluster = errors.New("cluster has no server defined")
+)
+
+type errContextNotFound struct {
+	ContextName string
+}
+
+func (e *errContextNotFound) Error() string {
+	return fmt.Sprintf("context was not found for specified context: %v", e.ContextName)
+}
+
+// IsContextNotFound returns a boolean indicating whether the error is known to
+// report that a context was not found
+func IsContextNotFound(err error) bool {
+	if err == nil {
+		return false
+	}
+	if _, ok := err.(*errContextNotFound); ok || err == ErrNoContext {
+		return true
+	}
+	return strings.Contains(err.Error(), "context was not found for specified context")
+}
+
+// IsEmptyConfig returns true if the provided error indicates the provided configuration
+// is empty.
+func IsEmptyConfig(err error) bool {
+	switch t := err.(type) {
+	case errConfigurationInvalid:
+		return len(t) == 1 && t[0] == ErrEmptyConfig
+	}
+	return err == ErrEmptyConfig
+}
+
+// errConfigurationInvalid is a set of errors indicating the configuration is invalid.
+type errConfigurationInvalid []error
+
+// errConfigurationInvalid implements error and Aggregate
+var _ error = errConfigurationInvalid{}
+var _ utilerrors.Aggregate = errConfigurationInvalid{}
+
+func newErrConfigurationInvalid(errs []error) error {
+	switch len(errs) {
+	case 0:
+		return nil
+	default:
+		return errConfigurationInvalid(errs)
+	}
+}
+
+// Error implements the error interface
+func (e errConfigurationInvalid) Error() string {
+	return fmt.Sprintf("invalid configuration: %v", utilerrors.NewAggregate(e).Error())
+}
+
+// Errors implements the AggregateError interface
+func (e errConfigurationInvalid) Errors() []error {
+	return e
+}
+
+// IsConfigurationInvalid returns true if the provided error indicates the configuration is invalid.
+func IsConfigurationInvalid(err error) bool {
+	switch err.(type) {
+	case *errContextNotFound, errConfigurationInvalid:
+		return true
+	}
+	return IsContextNotFound(err)
+}
+
+// Validate checks for errors in the Config.  It does not return early so that it can find as many errors as possible.
+func Validate(config clientcmdapi.Config) error {
+	validationErrors := make([]error, 0)
+
+	if clientcmdapi.IsConfigEmpty(&config) {
+		return newErrConfigurationInvalid([]error{ErrEmptyConfig})
+	}
+
+	if len(config.CurrentContext) != 0 {
+		if _, exists := config.Contexts[config.CurrentContext]; !exists {
+			validationErrors = append(validationErrors, &errContextNotFound{config.CurrentContext})
+		}
+	}
+
+	for contextName, context := range config.Contexts {
+		validationErrors = append(validationErrors, validateContext(contextName, *context, config)...)
+	}
+
+	for authInfoName, authInfo := range config.AuthInfos {
+		validationErrors = append(validationErrors, validateAuthInfo(authInfoName, *authInfo)...)
+	}
+
+	for clusterName, clusterInfo := range config.Clusters {
+		validationErrors = append(validationErrors, validateClusterInfo(clusterName, *clusterInfo)...)
+	}
+
+	return newErrConfigurationInvalid(validationErrors)
+}
+
+// ConfirmUsable looks a particular context and determines if that particular part of the config is useable.  There might still be errors in the config,
+// but no errors in the sections requested or referenced.  It does not return early so that it can find as many errors as possible.
+func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error {
+	validationErrors := make([]error, 0)
+
+	if clientcmdapi.IsConfigEmpty(&config) {
+		return newErrConfigurationInvalid([]error{ErrEmptyConfig})
+	}
+
+	var contextName string
+	if len(passedContextName) != 0 {
+		contextName = passedContextName
+	} else {
+		contextName = config.CurrentContext
+	}
+
+	if len(contextName) == 0 {
+		return ErrNoContext
+	}
+
+	context, exists := config.Contexts[contextName]
+	if !exists {
+		validationErrors = append(validationErrors, &errContextNotFound{contextName})
+	}
+
+	if exists {
+		validationErrors = append(validationErrors, validateContext(contextName, *context, config)...)
+		validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *config.AuthInfos[context.AuthInfo])...)
+		validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *config.Clusters[context.Cluster])...)
+	}
+
+	return newErrConfigurationInvalid(validationErrors)
+}
+
+// validateClusterInfo looks for conflicts and errors in the cluster info
+func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) []error {
+	validationErrors := make([]error, 0)
+
+	emptyCluster := clientcmdapi.NewCluster()
+	if reflect.DeepEqual(*emptyCluster, clusterInfo) {
+		return []error{ErrEmptyCluster}
+	}
+
+	if len(clusterInfo.Server) == 0 {
+		if len(clusterName) == 0 {
+			validationErrors = append(validationErrors, fmt.Errorf("default cluster has no server defined"))
+		} else {
+			validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName))
+		}
+	}
+	// Make sure CA data and CA file aren't both specified
+	if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 {
+		validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override.", clusterName))
+	}
+	if len(clusterInfo.CertificateAuthority) != 0 {
+		clientCertCA, err := os.Open(clusterInfo.CertificateAuthority)
+		defer clientCertCA.Close()
+		if err != nil {
+			validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err))
+		}
+	}
+
+	return validationErrors
+}
+
+// validateAuthInfo looks for conflicts and errors in the auth info
+func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []error {
+	validationErrors := make([]error, 0)
+
+	usingAuthPath := false
+	methods := make([]string, 0, 3)
+	if len(authInfo.Token) != 0 {
+		methods = append(methods, "token")
+	}
+	if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 {
+		methods = append(methods, "basicAuth")
+	}
+
+	if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 {
+		// Make sure cert data and file aren't both specified
+		if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 {
+			validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override.", authInfoName))
+		}
+		// Make sure key data and file aren't both specified
+		if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 {
+			validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName))
+		}
+		// Make sure a key is specified
+		if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 {
+			validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method.", authInfoName))
+		}
+
+		if len(authInfo.ClientCertificate) != 0 {
+			clientCertFile, err := os.Open(authInfo.ClientCertificate)
+			defer clientCertFile.Close()
+			if err != nil {
+				validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err))
+			}
+		}
+		if len(authInfo.ClientKey) != 0 {
+			clientKeyFile, err := os.Open(authInfo.ClientKey)
+			defer clientKeyFile.Close()
+			if err != nil {
+				validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err))
+			}
+		}
+	}
+
+	if authInfo.Exec != nil {
+		if authInfo.AuthProvider != nil {
+			validationErrors = append(validationErrors, fmt.Errorf("authProvider cannot be provided in combination with an exec plugin for %s", authInfoName))
+		}
+		if len(authInfo.Exec.Command) == 0 {
+			validationErrors = append(validationErrors, fmt.Errorf("command must be specified for %v to use exec authentication plugin", authInfoName))
+		}
+		if len(authInfo.Exec.APIVersion) == 0 {
+			validationErrors = append(validationErrors, fmt.Errorf("apiVersion must be specified for %v to use exec authentication plugin", authInfoName))
+		}
+		for _, v := range authInfo.Exec.Env {
+			if len(v.Name) == 0 {
+				validationErrors = append(validationErrors, fmt.Errorf("env variable name must be specified for %v to use exec authentication plugin", authInfoName))
+			} else if len(v.Value) == 0 {
+				validationErrors = append(validationErrors, fmt.Errorf("env variable %s value must be specified for %v to use exec authentication plugin", v.Name, authInfoName))
+			}
+		}
+	}
+
+	// authPath also provides information for the client to identify the server, so allow multiple auth methods in that case
+	if (len(methods) > 1) && (!usingAuthPath) {
+		validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods))
+	}
+
+	// ImpersonateGroups or ImpersonateUserExtra should be requested with a user
+	if (len(authInfo.ImpersonateGroups) > 0 || len(authInfo.ImpersonateUserExtra) > 0) && (len(authInfo.Impersonate) == 0) {
+		validationErrors = append(validationErrors, fmt.Errorf("requesting groups or user-extra for %v without impersonating a user", authInfoName))
+	}
+	return validationErrors
+}
+
+// validateContext looks for errors in the context.  It is not transitive, so errors in the reference authInfo or cluster configs are not included in this return
+func validateContext(contextName string, context clientcmdapi.Context, config clientcmdapi.Config) []error {
+	validationErrors := make([]error, 0)
+
+	if len(contextName) == 0 {
+		validationErrors = append(validationErrors, fmt.Errorf("empty context name for %#v is not allowed", context))
+	}
+
+	if len(context.AuthInfo) == 0 {
+		validationErrors = append(validationErrors, fmt.Errorf("user was not specified for context %q", contextName))
+	} else if _, exists := config.AuthInfos[context.AuthInfo]; !exists {
+		validationErrors = append(validationErrors, fmt.Errorf("user %q was not found for context %q", context.AuthInfo, contextName))
+	}
+
+	if len(context.Cluster) == 0 {
+		validationErrors = append(validationErrors, fmt.Errorf("cluster was not specified for context %q", contextName))
+	} else if _, exists := config.Clusters[context.Cluster]; !exists {
+		validationErrors = append(validationErrors, fmt.Errorf("cluster %q was not found for context %q", context.Cluster, contextName))
+	}
+
+	if len(context.Namespace) != 0 {
+		if len(validation.IsDNS1123Label(context.Namespace)) != 0 {
+			validationErrors = append(validationErrors, fmt.Errorf("namespace %q for context %q does not conform to the kubernetes DNS_LABEL rules", context.Namespace, contextName))
+		}
+	}
+
+	return validationErrors
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/metrics/OWNERS b/metrics-server/vendor/k8s.io/client-go/tools/metrics/OWNERS
new file mode 100755
index 0000000..ff51798
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/metrics/OWNERS
@@ -0,0 +1,7 @@
+reviewers:
+- wojtek-t
+- eparis
+- krousey
+- jayunit100
+- fgrzadkowski
+- tmrts
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/metrics/metrics.go b/metrics-server/vendor/k8s.io/client-go/tools/metrics/metrics.go
new file mode 100644
index 0000000..a01306c
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/metrics/metrics.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package metrics provides abstractions for registering which metrics
+// to record.
+package metrics
+
+import (
+	"net/url"
+	"sync"
+	"time"
+)
+
+var registerMetrics sync.Once
+
+// LatencyMetric observes client latency partitioned by verb and url.
+type LatencyMetric interface {
+	Observe(verb string, u url.URL, latency time.Duration)
+}
+
+// ResultMetric counts response codes partitioned by method and host.
+type ResultMetric interface {
+	Increment(code string, method string, host string)
+}
+
+var (
+	// RequestLatency is the latency metric that rest clients will update.
+	RequestLatency LatencyMetric = noopLatency{}
+	// RequestResult is the result metric that rest clients will update.
+	RequestResult ResultMetric = noopResult{}
+)
+
+// Register registers metrics for the rest client to use. This can
+// only be called once.
+func Register(lm LatencyMetric, rm ResultMetric) {
+	registerMetrics.Do(func() {
+		RequestLatency = lm
+		RequestResult = rm
+	})
+}
+
+type noopLatency struct{}
+
+func (noopLatency) Observe(string, url.URL, time.Duration) {}
+
+type noopResult struct{}
+
+func (noopResult) Increment(string, string, string) {}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/pager/pager.go b/metrics-server/vendor/k8s.io/client-go/tools/pager/pager.go
new file mode 100644
index 0000000..74ea358
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/pager/pager.go
@@ -0,0 +1,117 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pager
+
+import (
+	"context"
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/apimachinery/pkg/api/meta"
+	metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+const defaultPageSize = 500
+
+// ListPageFunc returns a list object for the given list options.
+type ListPageFunc func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error)
+
+// SimplePageFunc adapts a context-less list function into one that accepts a context.
+func SimplePageFunc(fn func(opts metav1.ListOptions) (runtime.Object, error)) ListPageFunc {
+	return func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) {
+		return fn(opts)
+	}
+}
+
+// ListPager assists client code in breaking large list queries into multiple
+// smaller chunks of PageSize or smaller. PageFn is expected to accept a
+// metav1.ListOptions that supports paging and return a list. The pager does
+// not alter the field or label selectors on the initial options list.
+type ListPager struct {
+	PageSize int64
+	PageFn   ListPageFunc
+
+	FullListIfExpired bool
+}
+
+// New creates a new pager from the provided pager function using the default
+// options. It will fall back to a full list if an expiration error is encountered
+// as a last resort.
+func New(fn ListPageFunc) *ListPager {
+	return &ListPager{
+		PageSize:          defaultPageSize,
+		PageFn:            fn,
+		FullListIfExpired: true,
+	}
+}
+
+// TODO: introduce other types of paging functions - such as those that retrieve from a list
+// of namespaces.
+
+// List returns a single list object, but attempts to retrieve smaller chunks from the
+// server to reduce the impact on the server. If the chunk attempt fails, it will load
+// the full list instead. The Limit field on options, if unset, will default to the page size.
+func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
+	if options.Limit == 0 {
+		options.Limit = p.PageSize
+	}
+	var list *metainternalversion.List
+	for {
+		obj, err := p.PageFn(ctx, options)
+		if err != nil {
+			if !errors.IsResourceExpired(err) || !p.FullListIfExpired {
+				return nil, err
+			}
+			// the list expired while we were processing, fall back to a full list
+			options.Limit = 0
+			options.Continue = ""
+			return p.PageFn(ctx, options)
+		}
+		m, err := meta.ListAccessor(obj)
+		if err != nil {
+			return nil, fmt.Errorf("returned object must be a list: %v", err)
+		}
+
+		// exit early and return the object we got if we haven't processed any pages
+		if len(m.GetContinue()) == 0 && list == nil {
+			return obj, nil
+		}
+
+		// initialize the list and fill its contents
+		if list == nil {
+			list = &metainternalversion.List{Items: make([]runtime.Object, 0, options.Limit+1)}
+			list.ResourceVersion = m.GetResourceVersion()
+			list.SelfLink = m.GetSelfLink()
+		}
+		if err := meta.EachListItem(obj, func(obj runtime.Object) error {
+			list.Items = append(list.Items, obj)
+			return nil
+		}); err != nil {
+			return nil, err
+		}
+
+		// if we have no more items, return the list
+		if len(m.GetContinue()) == 0 {
+			return list, nil
+		}
+
+		// set the next loop up
+		options.Continue = m.GetContinue()
+	}
+}
diff --git a/metrics-server/vendor/k8s.io/client-go/tools/reference/ref.go b/metrics-server/vendor/k8s.io/client-go/tools/reference/ref.go
new file mode 100644
index 0000000..573d948
--- /dev/null
+++ b/metrics-server/vendor/k8s.io/client-go/tools/reference/ref.go
@@ -0,0 +1,126 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package reference
+
+import (
+	"errors"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+var (
+	// Errors that could be returned by GetReference.
+	ErrNilObject  = errors.New("can't reference a nil object")
+	ErrNoSelfLink = errors.New("selfLink was empty, can't make reference")
+)
+
+// GetReference returns an ObjectReference which refers to the given
+// object, or an error if the object doesn't follow the conventions
+// that would allow this.
+// TODO: should take a meta.Interface see http://issue.k8s.io/7127
+func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*v1.ObjectReference, error) {
+	if obj == nil {
+		return nil, ErrNilObject
+	}
+	if ref, ok := obj.(*v1.ObjectReference); ok {
+		// Don't make a reference to a reference.
+		return ref, nil
+	}
+
+	gvk := obj.GetObjectKind().GroupVersionKind()
+
+	// if the object referenced is actually persisted, we can just get kind from meta
+	// if we are building an object reference to something not yet persisted, we should fallback to scheme
+	kind := gvk.Kind
+	if len(kind) == 0 {
+		// TODO: this is wrong
+		gvks, _, err := scheme.ObjectKinds(obj)
+		if err != nil {
+			return nil, err
+		}
+		kind = gvks[0].Kind
+	}
+
+	// An object that implements only List has enough metadata to build a reference
+	var listMeta metav1.Common
+	objectMeta, err := meta.Accessor(obj)
+	if err != nil {
+		listMeta, err = meta.CommonAccessor(obj)
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		listMeta = objectMeta
+	}
+
+	// if the object referenced is actually persisted, we can also get version from meta
+	version := gvk.GroupVersion().String()
+	if len(version) == 0 {
+		selfLink := listMeta.GetSelfLink()
+		if len(selfLink) == 0 {
+			return nil, ErrNoSelfLink
+		}
+		selfLinkUrl, err := url.Parse(selfLink)
+		if err != nil {
+			return nil, err
+		}
+		// example paths: /<prefix>/<version>/*
+		parts := strings.Split(selfLinkUrl.Path, "/")
+		if len(parts) < 4 {
+			return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version)
+		}
+		if parts[1] == "api" {
+			version = parts[2]
+		} else {
+			version = parts[2] + "/" + parts[3]
+		}
+	}
+
+	// only has list metadata
+	if objectMeta == nil {
+		return &v1.ObjectReference{
+			Kind:            kind,
+			APIVersion:      version,
+			ResourceVersion: listMeta.GetResourceVersion(),
+		}, nil
+	}
+
+	return &v1.ObjectReference{
+		Kind:            kind,
+		APIVersion:      version,
+		Name:            objectMeta.GetName(),
+		Namespace:       objectMeta.GetNamespace(),
+		UID:             objectMeta.GetUID(),
+		ResourceVersion: objectMeta.GetResourceVersion(),
+	}, nil
+}
+
+// GetPartialReference is exactly like GetReference, but allows you to set the FieldPath.
+func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*v1.ObjectReference, error) {
+	ref, err := GetReference(scheme, obj)
+	if err != nil {
+		return nil, err
+	}
+	ref.FieldPath = fieldPath
+	return ref, nil
+}