mirror of https://github.com/status-im/op-geth.git
.travis, build: autodelete old unstable archives (#13867)
This commit adds a build step to travis to auto-delete unstable archives older than 14 days (our regular release schedule) from Azure via ci.go purge. The commit also pulls in the latest Azure storage code, also switching over from the old import path (github.com/Azure/azure-sdk-for-go) to the new split one (github.com/Azure/azure-storage-go).
This commit is contained in:
parent
3d8de95f99
commit
c76ad94492
10
.travis.yml
10
.travis.yml
|
@ -178,6 +178,16 @@ matrix:
|
|||
|
||||
- go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds
|
||||
|
||||
# This builder does the Azure archive purges to avoid accumulating junk
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8
|
||||
env:
|
||||
- azure-purge
|
||||
script:
|
||||
- go run build/ci.go purge -store gethstore/builds -days 14
|
||||
|
||||
install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
script:
|
||||
|
|
66
build/ci.go
66
build/ci.go
|
@ -32,6 +32,7 @@ Available commands are:
|
|||
aar [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an Android archive
|
||||
xcode [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an iOS XCode framework
|
||||
xgo [ -alltools ] [ options ] -- cross builds according to options
|
||||
purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore
|
||||
|
||||
For all commands, -n prevents execution of external programs (dry run mode).
|
||||
|
||||
|
@ -146,6 +147,8 @@ func main() {
|
|||
doXCodeFramework(os.Args[2:])
|
||||
case "xgo":
|
||||
doXgo(os.Args[2:])
|
||||
case "purge":
|
||||
doPurge(os.Args[2:])
|
||||
default:
|
||||
log.Fatal("unknown command ", os.Args[1])
|
||||
}
|
||||
|
@ -427,6 +430,10 @@ func maybeSkipArchive(env build.Environment) {
|
|||
log.Printf("skipping because this is a PR build")
|
||||
os.Exit(0)
|
||||
}
|
||||
if env.IsCronJob {
|
||||
log.Printf("skipping because this is a cron job")
|
||||
os.Exit(0)
|
||||
}
|
||||
if env.Branch != "master" && !strings.HasPrefix(env.Tag, "v1.") {
|
||||
log.Printf("skipping because branch %q, tag %q is not on the whitelist", env.Branch, env.Tag)
|
||||
os.Exit(0)
|
||||
|
@ -952,3 +959,62 @@ func xgoTool(args []string) *exec.Cmd {
|
|||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
// Binary distribution cleanups
|
||||
|
||||
func doPurge(cmdline []string) {
|
||||
var (
|
||||
store = flag.String("store", "", `Destination from where to purge archives (usually "gethstore/builds")`)
|
||||
limit = flag.Int("days", 30, `Age threshold above which to delete unstalbe archives`)
|
||||
)
|
||||
flag.CommandLine.Parse(cmdline)
|
||||
|
||||
if env := build.Env(); !env.IsCronJob {
|
||||
log.Printf("skipping because not a cron job")
|
||||
os.Exit(0)
|
||||
}
|
||||
// Create the azure authentication and list the current archives
|
||||
auth := build.AzureBlobstoreConfig{
|
||||
Account: strings.Split(*store, "/")[0],
|
||||
Token: os.Getenv("AZURE_BLOBSTORE_TOKEN"),
|
||||
Container: strings.SplitN(*store, "/", 2)[1],
|
||||
}
|
||||
blobs, err := build.AzureBlobstoreList(auth)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Iterate over the blobs, collect and sort all unstable builds
|
||||
for i := 0; i < len(blobs); i++ {
|
||||
if !strings.Contains(blobs[i].Name, "unstable") {
|
||||
blobs = append(blobs[:i], blobs[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(blobs); i++ {
|
||||
for j := i + 1; j < len(blobs); j++ {
|
||||
iTime, err := time.Parse(time.RFC1123, blobs[i].Properties.LastModified)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
jTime, err := time.Parse(time.RFC1123, blobs[j].Properties.LastModified)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if iTime.After(jTime) {
|
||||
blobs[i], blobs[j] = blobs[j], blobs[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
// Filter out all archives more recent that the given threshold
|
||||
for i, blob := range blobs {
|
||||
timestamp, _ := time.Parse(time.RFC1123, blob.Properties.LastModified)
|
||||
if time.Since(timestamp) < time.Duration(*limit)*24*time.Hour {
|
||||
blobs = blobs[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
// Delete all marked as such and return
|
||||
if err := build.AzureBlobstoreDelete(auth, blobs); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
storage "github.com/Azure/azure-storage-go"
|
||||
)
|
||||
|
||||
// AzureBlobstoreConfig is an authentication and configuration struct containing
|
||||
|
@ -42,7 +42,6 @@ func AzureBlobstoreUpload(path string, name string, config AzureBlobstoreConfig)
|
|||
fmt.Printf("would upload %q to %s/%s/%s\n", path, config.Account, config.Container, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create an authenticated client against the Azure cloud
|
||||
rawClient, err := storage.NewBasicClient(config.Account, config.Token)
|
||||
if err != nil {
|
||||
|
@ -63,3 +62,50 @@ func AzureBlobstoreUpload(path string, name string, config AzureBlobstoreConfig)
|
|||
}
|
||||
return client.CreateBlockBlobFromReader(config.Container, name, uint64(info.Size()), in, nil)
|
||||
}
|
||||
|
||||
// AzureBlobstoreList lists all the files contained within an azure blobstore.
|
||||
func AzureBlobstoreList(config AzureBlobstoreConfig) ([]storage.Blob, error) {
|
||||
// Create an authenticated client against the Azure cloud
|
||||
rawClient, err := storage.NewBasicClient(config.Account, config.Token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := rawClient.GetBlobService()
|
||||
|
||||
// List all the blobs from the container and return them
|
||||
container := client.GetContainerReference(config.Container)
|
||||
|
||||
blobs, err := container.ListBlobs(storage.ListBlobsParameters{
|
||||
MaxResults: 1024 * 1024 * 1024, // Yes, fetch all of them
|
||||
Timeout: 3600, // Yes, wait for all of them
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blobs.Blobs, nil
|
||||
}
|
||||
|
||||
// AzureBlobstoreDelete iterates over a list of files to delete and removes them
|
||||
// from the blobstore.
|
||||
func AzureBlobstoreDelete(config AzureBlobstoreConfig, blobs []storage.Blob) error {
|
||||
if *DryRunFlag {
|
||||
for _, blob := range blobs {
|
||||
fmt.Printf("would delete %s (%s) from %s/%s\n", blob.Name, blob.Properties.LastModified, config.Account, config.Container)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Create an authenticated client against the Azure cloud
|
||||
rawClient, err := storage.NewBasicClient(config.Account, config.Token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client := rawClient.GetBlobService()
|
||||
|
||||
// Iterate over the blobs and delete them
|
||||
for _, blob := range blobs {
|
||||
if err := client.DeleteBlob(config.Container, blob.Name, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ var (
|
|||
GitTagFlag = flag.String("git-tag", "", `Overrides git tag being built`)
|
||||
BuildnumFlag = flag.String("buildnum", "", `Overrides CI build number`)
|
||||
PullRequestFlag = flag.Bool("pull-request", false, `Overrides pull request status of the build`)
|
||||
CronJobFlag = flag.Bool("cron-job", false, `Overrides cron job status of the build`)
|
||||
)
|
||||
|
||||
// Environment contains metadata provided by the build environment.
|
||||
|
@ -39,6 +40,7 @@ type Environment struct {
|
|||
Commit, Branch, Tag string // Git info
|
||||
Buildnum string
|
||||
IsPullRequest bool
|
||||
IsCronJob bool
|
||||
}
|
||||
|
||||
func (env Environment) String() string {
|
||||
|
@ -59,6 +61,7 @@ func Env() Environment {
|
|||
Tag: os.Getenv("TRAVIS_TAG"),
|
||||
Buildnum: os.Getenv("TRAVIS_BUILD_NUMBER"),
|
||||
IsPullRequest: os.Getenv("TRAVIS_PULL_REQUEST") != "false",
|
||||
IsCronJob: os.Getenv("TRAVIS_EVENT_TYPE") == "cron",
|
||||
}
|
||||
case os.Getenv("CI") == "True" && os.Getenv("APPVEYOR") == "True":
|
||||
return Environment{
|
||||
|
@ -69,6 +72,7 @@ func Env() Environment {
|
|||
Tag: os.Getenv("APPVEYOR_REPO_TAG_NAME"),
|
||||
Buildnum: os.Getenv("APPVEYOR_BUILD_NUMBER"),
|
||||
IsPullRequest: os.Getenv("APPVEYOR_PULL_REQUEST_NUMBER") != "",
|
||||
IsCronJob: os.Getenv("APPVEYOR_SCHEDULED_BUILD") == "True",
|
||||
}
|
||||
default:
|
||||
return LocalEnv()
|
||||
|
@ -118,5 +122,8 @@ func applyEnvFlags(env Environment) Environment {
|
|||
if *PullRequestFlag {
|
||||
env.IsPullRequest = true
|
||||
}
|
||||
if *CronJobFlag {
|
||||
env.IsCronJob = true
|
||||
}
|
||||
return env
|
||||
}
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
# Azure Storage SDK for Go
|
||||
|
||||
The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform operations in Azure Storage Service. To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](../arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](../management/storageservice) package.
|
||||
|
||||
This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/)
|
|
@ -1,352 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FileServiceClient contains operations for Microsoft Azure File Service.
|
||||
type FileServiceClient struct {
|
||||
client Client
|
||||
}
|
||||
|
||||
// A Share is an entry in ShareListResponse.
|
||||
type Share struct {
|
||||
Name string `xml:"Name"`
|
||||
Properties ShareProperties `xml:"Properties"`
|
||||
}
|
||||
|
||||
// ShareProperties contains various properties of a share returned from
|
||||
// various endpoints like ListShares.
|
||||
type ShareProperties struct {
|
||||
LastModified string `xml:"Last-Modified"`
|
||||
Etag string `xml:"Etag"`
|
||||
Quota string `xml:"Quota"`
|
||||
}
|
||||
|
||||
// ShareListResponse contains the response fields from
|
||||
// ListShares call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx
|
||||
type ShareListResponse struct {
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
MaxResults int64 `xml:"MaxResults"`
|
||||
Shares []Share `xml:"Shares>Share"`
|
||||
}
|
||||
|
||||
// ListSharesParameters defines the set of customizable parameters to make a
|
||||
// List Shares call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx
|
||||
type ListSharesParameters struct {
|
||||
Prefix string
|
||||
Marker string
|
||||
Include string
|
||||
MaxResults uint
|
||||
Timeout uint
|
||||
}
|
||||
|
||||
// ShareHeaders contains various properties of a file and is an entry
|
||||
// in SetShareProperties
|
||||
type ShareHeaders struct {
|
||||
Quota string `header:"x-ms-share-quota"`
|
||||
}
|
||||
|
||||
func (p ListSharesParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Prefix != "" {
|
||||
out.Set("prefix", p.Prefix)
|
||||
}
|
||||
if p.Marker != "" {
|
||||
out.Set("marker", p.Marker)
|
||||
}
|
||||
if p.Include != "" {
|
||||
out.Set("include", p.Include)
|
||||
}
|
||||
if p.MaxResults != 0 {
|
||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
||||
}
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// pathForFileShare returns the URL path segment for a File Share resource
|
||||
func pathForFileShare(name string) string {
|
||||
return fmt.Sprintf("/%s", name)
|
||||
}
|
||||
|
||||
// ListShares returns the list of shares in a storage account along with
|
||||
// pagination token and other response details.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
func (f FileServiceClient) ListShares(params ListSharesParameters) (ShareListResponse, error) {
|
||||
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
|
||||
uri := f.client.getEndpoint(fileServiceName, "", q)
|
||||
headers := f.client.getStandardHeaders()
|
||||
|
||||
var out ShareListResponse
|
||||
resp, err := f.client.exec("GET", uri, headers, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// CreateShare operation creates a new share under the specified account. If the
|
||||
// share with the same name already exists, the operation fails.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx
|
||||
func (f FileServiceClient) CreateShare(name string) error {
|
||||
resp, err := f.createShare(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// ShareExists returns true if a share with given name exists
|
||||
// on the storage account, otherwise returns false.
|
||||
func (f FileServiceClient) ShareExists(name string) (bool, error) {
|
||||
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}})
|
||||
headers := f.client.getStandardHeaders()
|
||||
|
||||
resp, err := f.client.exec("HEAD", uri, headers, nil)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusOK, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// GetShareURL gets the canonical URL to the share with the specified name in the
|
||||
// specified container. This method does not create a publicly accessible URL if
|
||||
// the file is private and this method does not check if the file
|
||||
// exists.
|
||||
func (f FileServiceClient) GetShareURL(name string) string {
|
||||
return f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{})
|
||||
}
|
||||
|
||||
// CreateShareIfNotExists creates a new share under the specified account if
|
||||
// it does not exist. Returns true if container is newly created or false if
|
||||
// container already exists.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx
|
||||
func (f FileServiceClient) CreateShareIfNotExists(name string) (bool, error) {
|
||||
resp, err := f.createShare(name)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
||||
return resp.statusCode == http.StatusCreated, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// CreateShare creates a Azure File Share and returns its response
|
||||
func (f FileServiceClient) createShare(name string) (*storageResponse, error) {
|
||||
if err := f.checkForStorageEmulator(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}})
|
||||
headers := f.client.getStandardHeaders()
|
||||
return f.client.exec("PUT", uri, headers, nil)
|
||||
}
|
||||
|
||||
// GetShareProperties provides various information about the specified
|
||||
// file. See https://msdn.microsoft.com/en-us/library/azure/dn689099.aspx
|
||||
func (f FileServiceClient) GetShareProperties(name string) (*ShareProperties, error) {
|
||||
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}})
|
||||
|
||||
headers := f.client.getStandardHeaders()
|
||||
resp, err := f.client.exec("HEAD", uri, headers, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ShareProperties{
|
||||
LastModified: resp.headers.Get("Last-Modified"),
|
||||
Etag: resp.headers.Get("Etag"),
|
||||
Quota: resp.headers.Get("x-ms-share-quota"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SetShareProperties replaces the ShareHeaders for the specified file.
|
||||
//
|
||||
// Some keys may be converted to Camel-Case before sending. All keys
|
||||
// are returned in lower case by SetShareProperties. HTTP header names
|
||||
// are case-insensitive so case munging should not matter to other
|
||||
// applications either.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/mt427368.aspx
|
||||
func (f FileServiceClient) SetShareProperties(name string, shareHeaders ShareHeaders) error {
|
||||
params := url.Values{}
|
||||
params.Set("restype", "share")
|
||||
params.Set("comp", "properties")
|
||||
|
||||
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), params)
|
||||
headers := f.client.getStandardHeaders()
|
||||
|
||||
extraHeaders := headersFromStruct(shareHeaders)
|
||||
|
||||
for k, v := range extraHeaders {
|
||||
headers[k] = v
|
||||
}
|
||||
|
||||
resp, err := f.client.exec("PUT", uri, headers, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// DeleteShare operation marks the specified share for deletion. The share
|
||||
// and any files contained within it are later deleted during garbage
|
||||
// collection.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx
|
||||
func (f FileServiceClient) DeleteShare(name string) error {
|
||||
resp, err := f.deleteShare(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
||||
}
|
||||
|
||||
// DeleteShareIfExists operation marks the specified share for deletion if it
|
||||
// exists. The share and any files contained within it are later deleted during
|
||||
// garbage collection. Returns true if share existed and deleted with this call,
|
||||
// false otherwise.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx
|
||||
func (f FileServiceClient) DeleteShareIfExists(name string) (bool, error) {
|
||||
resp, err := f.deleteShare(name)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// deleteShare makes the call to Delete Share operation endpoint and returns
|
||||
// the response
|
||||
func (f FileServiceClient) deleteShare(name string) (*storageResponse, error) {
|
||||
if err := f.checkForStorageEmulator(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}})
|
||||
return f.client.exec("DELETE", uri, f.client.getStandardHeaders(), nil)
|
||||
}
|
||||
|
||||
// SetShareMetadata replaces the metadata for the specified Share.
|
||||
//
|
||||
// Some keys may be converted to Camel-Case before sending. All keys
|
||||
// are returned in lower case by GetShareMetadata. HTTP header names
|
||||
// are case-insensitive so case munging should not matter to other
|
||||
// applications either.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
||||
func (f FileServiceClient) SetShareMetadata(name string, metadata map[string]string, extraHeaders map[string]string) error {
|
||||
params := url.Values{}
|
||||
params.Set("restype", "share")
|
||||
params.Set("comp", "metadata")
|
||||
|
||||
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), params)
|
||||
headers := f.client.getStandardHeaders()
|
||||
for k, v := range metadata {
|
||||
headers[userDefinedMetadataHeaderPrefix+k] = v
|
||||
}
|
||||
|
||||
for k, v := range extraHeaders {
|
||||
headers[k] = v
|
||||
}
|
||||
|
||||
resp, err := f.client.exec("PUT", uri, headers, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// GetShareMetadata returns all user-defined metadata for the specified share.
|
||||
//
|
||||
// All metadata keys will be returned in lower case. (HTTP header
|
||||
// names are case-insensitive.)
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
||||
func (f FileServiceClient) GetShareMetadata(name string) (map[string]string, error) {
|
||||
params := url.Values{}
|
||||
params.Set("restype", "share")
|
||||
params.Set("comp", "metadata")
|
||||
|
||||
uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), params)
|
||||
headers := f.client.getStandardHeaders()
|
||||
|
||||
resp, err := f.client.exec("GET", uri, headers, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metadata := make(map[string]string)
|
||||
for k, v := range resp.headers {
|
||||
// Can't trust CanonicalHeaderKey() to munge case
|
||||
// reliably. "_" is allowed in identifiers:
|
||||
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
||||
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
|
||||
// http://tools.ietf.org/html/rfc7230#section-3.2
|
||||
// ...but "_" is considered invalid by
|
||||
// CanonicalMIMEHeaderKey in
|
||||
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
|
||||
// so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar".
|
||||
k = strings.ToLower(k)
|
||||
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
|
||||
continue
|
||||
}
|
||||
// metadata["foo"] = content of the last X-Ms-Meta-Foo header
|
||||
k = k[len(userDefinedMetadataHeaderPrefix):]
|
||||
metadata[k] = v[len(v)-1]
|
||||
}
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
//checkForStorageEmulator determines if the client is setup for use with
|
||||
//Azure Storage Emulator, and returns a relevant error
|
||||
func (f FileServiceClient) checkForStorageEmulator() error {
|
||||
if f.client.accountName == StorageEmulatorAccountName {
|
||||
return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator")
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,129 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// TableServiceClient contains operations for Microsoft Azure Table Storage
|
||||
// Service.
|
||||
type TableServiceClient struct {
|
||||
client Client
|
||||
}
|
||||
|
||||
// AzureTable is the typedef of the Azure Table name
|
||||
type AzureTable string
|
||||
|
||||
const (
|
||||
tablesURIPath = "/Tables"
|
||||
)
|
||||
|
||||
type createTableRequest struct {
|
||||
TableName string `json:"TableName"`
|
||||
}
|
||||
|
||||
func pathForTable(table AzureTable) string { return fmt.Sprintf("%s", table) }
|
||||
|
||||
func (c *TableServiceClient) getStandardHeaders() map[string]string {
|
||||
return map[string]string{
|
||||
"x-ms-version": "2015-02-21",
|
||||
"x-ms-date": currentTimeRfc1123Formatted(),
|
||||
"Accept": "application/json;odata=nometadata",
|
||||
"Accept-Charset": "UTF-8",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
}
|
||||
|
||||
// QueryTables returns the tables created in the
|
||||
// *TableServiceClient storage account.
|
||||
func (c *TableServiceClient) QueryTables() ([]AzureTable, error) {
|
||||
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
|
||||
|
||||
headers := c.getStandardHeaders()
|
||||
headers["Content-Length"] = "0"
|
||||
|
||||
resp, err := c.client.execTable("GET", uri, headers, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(resp.body)
|
||||
|
||||
var respArray queryTablesResponse
|
||||
if err := json.Unmarshal(buf.Bytes(), &respArray); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := make([]AzureTable, len(respArray.TableName))
|
||||
for i, elem := range respArray.TableName {
|
||||
s[i] = AzureTable(elem.TableName)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// CreateTable creates the table given the specific
|
||||
// name. This function fails if the name is not compliant
|
||||
// with the specification or the tables already exists.
|
||||
func (c *TableServiceClient) CreateTable(table AzureTable) error {
|
||||
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
|
||||
|
||||
headers := c.getStandardHeaders()
|
||||
|
||||
req := createTableRequest{TableName: string(table)}
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
if err := json.NewEncoder(buf).Encode(req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers["Content-Length"] = fmt.Sprintf("%d", buf.Len())
|
||||
|
||||
resp, err := c.client.execTable("POST", uri, headers, buf)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteTable deletes the table given the specific
|
||||
// name. This function fails if the table is not present.
|
||||
// Be advised: DeleteTable deletes all the entries
|
||||
// that may be present.
|
||||
func (c *TableServiceClient) DeleteTable(table AzureTable) error {
|
||||
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
|
||||
uri += fmt.Sprintf("('%s')", string(table))
|
||||
|
||||
headers := c.getStandardHeaders()
|
||||
|
||||
headers["Content-Length"] = "0"
|
||||
|
||||
resp, err := c.client.execTable("DELETE", uri, headers, nil)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
||||
return err
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE
|
|
@ -0,0 +1,10 @@
|
|||
# Azure Storage SDK for Go
|
||||
[![GoDoc](https://godoc.org/github.com/Azure/azure-storage-go?status.svg)](https://godoc.org/github.com/Azure/azure-storage-go) [![Build Status](https://travis-ci.org/Azure/azure-storage-go.svg?branch=master)](https://travis-ci.org/Azure/azure-storage-go) [![Go Report Card](https://goreportcard.com/badge/github.com/Azure/azure-storage-go)](https://goreportcard.com/report/github.com/Azure/azure-storage-go)
|
||||
|
||||
The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform operations in Azure Storage Service. To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](../arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](../management/storageservice) package.
|
||||
|
||||
This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/)
|
||||
|
||||
# Contributing
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
|
@ -0,0 +1,223 @@
|
|||
// Package storage provides clients for Microsoft Azure Storage Services.
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/authentication-for-the-azure-storage-services
|
||||
|
||||
type authentication string
|
||||
|
||||
const (
|
||||
sharedKey authentication = "sharedKey"
|
||||
sharedKeyForTable authentication = "sharedKeyTable"
|
||||
sharedKeyLite authentication = "sharedKeyLite"
|
||||
sharedKeyLiteForTable authentication = "sharedKeyLiteTable"
|
||||
|
||||
// headers
|
||||
headerAuthorization = "Authorization"
|
||||
headerContentLength = "Content-Length"
|
||||
headerDate = "Date"
|
||||
headerXmsDate = "x-ms-date"
|
||||
headerXmsVersion = "x-ms-version"
|
||||
headerContentEncoding = "Content-Encoding"
|
||||
headerContentLanguage = "Content-Language"
|
||||
headerContentType = "Content-Type"
|
||||
headerContentMD5 = "Content-MD5"
|
||||
headerIfModifiedSince = "If-Modified-Since"
|
||||
headerIfMatch = "If-Match"
|
||||
headerIfNoneMatch = "If-None-Match"
|
||||
headerIfUnmodifiedSince = "If-Unmodified-Since"
|
||||
headerRange = "Range"
|
||||
)
|
||||
|
||||
func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) {
|
||||
authHeader, err := c.getSharedKey(verb, url, headers, auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headers[headerAuthorization] = authHeader
|
||||
return headers, nil
|
||||
}
|
||||
|
||||
func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) {
|
||||
canRes, err := c.buildCanonicalizedResource(url, auth)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
canString, err := buildCanonicalizedString(verb, headers, canRes, auth)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return c.createAuthorizationHeader(canString, auth), nil
|
||||
}
|
||||
|
||||
func (c *Client) buildCanonicalizedResource(uri string, auth authentication) (string, error) {
|
||||
errMsg := "buildCanonicalizedResource error: %s"
|
||||
u, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(errMsg, err.Error())
|
||||
}
|
||||
|
||||
cr := bytes.NewBufferString("/")
|
||||
cr.WriteString(c.getCanonicalizedAccountName())
|
||||
|
||||
if len(u.Path) > 0 {
|
||||
// Any portion of the CanonicalizedResource string that is derived from
|
||||
// the resource's URI should be encoded exactly as it is in the URI.
|
||||
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
|
||||
cr.WriteString(u.EscapedPath())
|
||||
}
|
||||
|
||||
params, err := url.ParseQuery(u.RawQuery)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(errMsg, err.Error())
|
||||
}
|
||||
|
||||
// See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277
|
||||
if auth == sharedKey {
|
||||
if len(params) > 0 {
|
||||
cr.WriteString("\n")
|
||||
|
||||
keys := []string{}
|
||||
for key := range params {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
completeParams := []string{}
|
||||
for _, key := range keys {
|
||||
if len(params[key]) > 1 {
|
||||
sort.Strings(params[key])
|
||||
}
|
||||
|
||||
completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")))
|
||||
}
|
||||
cr.WriteString(strings.Join(completeParams, "\n"))
|
||||
}
|
||||
} else {
|
||||
// search for "comp" parameter, if exists then add it to canonicalizedresource
|
||||
if v, ok := params["comp"]; ok {
|
||||
cr.WriteString("?comp=" + v[0])
|
||||
}
|
||||
}
|
||||
|
||||
return string(cr.Bytes()), nil
|
||||
}
|
||||
|
||||
func (c *Client) getCanonicalizedAccountName() string {
|
||||
// since we may be trying to access a secondary storage account, we need to
|
||||
// remove the -secondary part of the storage name
|
||||
return strings.TrimSuffix(c.accountName, "-secondary")
|
||||
}
|
||||
|
||||
func buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string, auth authentication) (string, error) {
|
||||
contentLength := headers[headerContentLength]
|
||||
if contentLength == "0" {
|
||||
contentLength = ""
|
||||
}
|
||||
date := headers[headerDate]
|
||||
if v, ok := headers[headerXmsDate]; ok {
|
||||
if auth == sharedKey || auth == sharedKeyLite {
|
||||
date = ""
|
||||
} else {
|
||||
date = v
|
||||
}
|
||||
}
|
||||
var canString string
|
||||
switch auth {
|
||||
case sharedKey:
|
||||
canString = strings.Join([]string{
|
||||
verb,
|
||||
headers[headerContentEncoding],
|
||||
headers[headerContentLanguage],
|
||||
contentLength,
|
||||
headers[headerContentMD5],
|
||||
headers[headerContentType],
|
||||
date,
|
||||
headers[headerIfModifiedSince],
|
||||
headers[headerIfMatch],
|
||||
headers[headerIfNoneMatch],
|
||||
headers[headerIfUnmodifiedSince],
|
||||
headers[headerRange],
|
||||
buildCanonicalizedHeader(headers),
|
||||
canonicalizedResource,
|
||||
}, "\n")
|
||||
case sharedKeyForTable:
|
||||
canString = strings.Join([]string{
|
||||
verb,
|
||||
headers[headerContentMD5],
|
||||
headers[headerContentType],
|
||||
date,
|
||||
canonicalizedResource,
|
||||
}, "\n")
|
||||
case sharedKeyLite:
|
||||
canString = strings.Join([]string{
|
||||
verb,
|
||||
headers[headerContentMD5],
|
||||
headers[headerContentType],
|
||||
date,
|
||||
buildCanonicalizedHeader(headers),
|
||||
canonicalizedResource,
|
||||
}, "\n")
|
||||
case sharedKeyLiteForTable:
|
||||
canString = strings.Join([]string{
|
||||
date,
|
||||
canonicalizedResource,
|
||||
}, "\n")
|
||||
default:
|
||||
return "", fmt.Errorf("%s authentication is not supported yet", auth)
|
||||
}
|
||||
return canString, nil
|
||||
}
|
||||
|
||||
func buildCanonicalizedHeader(headers map[string]string) string {
|
||||
cm := make(map[string]string)
|
||||
|
||||
for k, v := range headers {
|
||||
headerName := strings.TrimSpace(strings.ToLower(k))
|
||||
if strings.HasPrefix(headerName, "x-ms-") {
|
||||
cm[headerName] = v
|
||||
}
|
||||
}
|
||||
|
||||
if len(cm) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
keys := []string{}
|
||||
for key := range cm {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
|
||||
ch := bytes.NewBufferString("")
|
||||
|
||||
for _, key := range keys {
|
||||
ch.WriteString(key)
|
||||
ch.WriteRune(':')
|
||||
ch.WriteString(cm[key])
|
||||
ch.WriteRune('\n')
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(string(ch.Bytes()), "\n")
|
||||
}
|
||||
|
||||
func (c *Client) createAuthorizationHeader(canonicalizedString string, auth authentication) string {
|
||||
signature := c.computeHmac256(canonicalizedString)
|
||||
var key string
|
||||
switch auth {
|
||||
case sharedKey, sharedKeyForTable:
|
||||
key = "SharedKey"
|
||||
case sharedKeyLite, sharedKeyLiteForTable:
|
||||
key = "SharedKeyLite"
|
||||
}
|
||||
return fmt.Sprintf("%s %s:%s", key, c.getCanonicalizedAccountName(), signature)
|
||||
}
|
|
@ -13,44 +13,6 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
// BlobStorageClient contains operations for Microsoft Azure Blob Storage
|
||||
// Service.
|
||||
type BlobStorageClient struct {
|
||||
client Client
|
||||
}
|
||||
|
||||
// A Container is an entry in ContainerListResponse.
|
||||
type Container struct {
|
||||
Name string `xml:"Name"`
|
||||
Properties ContainerProperties `xml:"Properties"`
|
||||
// TODO (ahmetalpbalkan) Metadata
|
||||
}
|
||||
|
||||
// ContainerProperties contains various properties of a container returned from
|
||||
// various endpoints like ListContainers.
|
||||
type ContainerProperties struct {
|
||||
LastModified string `xml:"Last-Modified"`
|
||||
Etag string `xml:"Etag"`
|
||||
LeaseStatus string `xml:"LeaseStatus"`
|
||||
LeaseState string `xml:"LeaseState"`
|
||||
LeaseDuration string `xml:"LeaseDuration"`
|
||||
// TODO (ahmetalpbalkan) remaining fields
|
||||
}
|
||||
|
||||
// ContainerListResponse contains the response fields from
|
||||
// ListContainers call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
type ContainerListResponse struct {
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
MaxResults int64 `xml:"MaxResults"`
|
||||
Containers []Container `xml:"Containers>Container"`
|
||||
}
|
||||
|
||||
// A Blob is an entry in BlobListResponse.
|
||||
type Blob struct {
|
||||
Name string `xml:"Name"`
|
||||
|
@ -122,6 +84,7 @@ type BlobProperties struct {
|
|||
CopyCompletionTime string `xml:"CopyCompletionTime"`
|
||||
CopyStatusDescription string `xml:"CopyStatusDescription"`
|
||||
LeaseStatus string `xml:"LeaseStatus"`
|
||||
LeaseState string `xml:"LeaseState"`
|
||||
}
|
||||
|
||||
// BlobHeaders contains various properties of a blob and is an entry
|
||||
|
@ -134,101 +97,6 @@ type BlobHeaders struct {
|
|||
CacheControl string `header:"x-ms-blob-cache-control"`
|
||||
}
|
||||
|
||||
// BlobListResponse contains the response fields from ListBlobs call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
||||
type BlobListResponse struct {
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
MaxResults int64 `xml:"MaxResults"`
|
||||
Blobs []Blob `xml:"Blobs>Blob"`
|
||||
|
||||
// BlobPrefix is used to traverse blobs as if it were a file system.
|
||||
// It is returned if ListBlobsParameters.Delimiter is specified.
|
||||
// The list here can be thought of as "folders" that may contain
|
||||
// other folders or blobs.
|
||||
BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"`
|
||||
|
||||
// Delimiter is used to traverse blobs as if it were a file system.
|
||||
// It is returned if ListBlobsParameters.Delimiter is specified.
|
||||
Delimiter string `xml:"Delimiter"`
|
||||
}
|
||||
|
||||
// ListContainersParameters defines the set of customizable parameters to make a
|
||||
// List Containers call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
type ListContainersParameters struct {
|
||||
Prefix string
|
||||
Marker string
|
||||
Include string
|
||||
MaxResults uint
|
||||
Timeout uint
|
||||
}
|
||||
|
||||
func (p ListContainersParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Prefix != "" {
|
||||
out.Set("prefix", p.Prefix)
|
||||
}
|
||||
if p.Marker != "" {
|
||||
out.Set("marker", p.Marker)
|
||||
}
|
||||
if p.Include != "" {
|
||||
out.Set("include", p.Include)
|
||||
}
|
||||
if p.MaxResults != 0 {
|
||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
||||
}
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// ListBlobsParameters defines the set of customizable
|
||||
// parameters to make a List Blobs call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
||||
type ListBlobsParameters struct {
|
||||
Prefix string
|
||||
Delimiter string
|
||||
Marker string
|
||||
Include string
|
||||
MaxResults uint
|
||||
Timeout uint
|
||||
}
|
||||
|
||||
func (p ListBlobsParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Prefix != "" {
|
||||
out.Set("prefix", p.Prefix)
|
||||
}
|
||||
if p.Delimiter != "" {
|
||||
out.Set("delimiter", p.Delimiter)
|
||||
}
|
||||
if p.Marker != "" {
|
||||
out.Set("marker", p.Marker)
|
||||
}
|
||||
if p.Include != "" {
|
||||
out.Set("include", p.Include)
|
||||
}
|
||||
if p.MaxResults != 0 {
|
||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
||||
}
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// BlobType defines the type of the Azure Blob.
|
||||
type BlobType string
|
||||
|
||||
|
@ -259,7 +127,7 @@ const (
|
|||
// lease constants.
|
||||
const (
|
||||
leaseHeaderPrefix = "x-ms-lease-"
|
||||
leaseID = "x-ms-lease-id"
|
||||
headerLeaseID = "x-ms-lease-id"
|
||||
leaseAction = "x-ms-lease-action"
|
||||
leaseBreakPeriod = "x-ms-lease-break-period"
|
||||
leaseDuration = "x-ms-lease-duration"
|
||||
|
@ -287,23 +155,9 @@ const (
|
|||
BlockListTypeUncommitted BlockListType = "uncommitted"
|
||||
)
|
||||
|
||||
// ContainerAccessType defines the access level to the container from a public
|
||||
// request.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms-
|
||||
// blob-public-access" header.
|
||||
type ContainerAccessType string
|
||||
|
||||
// Access options for containers
|
||||
const (
|
||||
ContainerAccessTypePrivate ContainerAccessType = ""
|
||||
ContainerAccessTypeBlob ContainerAccessType = "blob"
|
||||
ContainerAccessTypeContainer ContainerAccessType = "container"
|
||||
)
|
||||
|
||||
// Maximum sizes (per REST API) for various concepts
|
||||
const (
|
||||
MaxBlobBlockSize = 4 * 1024 * 1024
|
||||
MaxBlobBlockSize = 100 * 1024 * 1024
|
||||
MaxBlobPageSize = 4 * 1024 * 1024
|
||||
)
|
||||
|
||||
|
@ -341,7 +195,7 @@ type BlockResponse struct {
|
|||
Size int64 `xml:"Size"`
|
||||
}
|
||||
|
||||
// GetPageRangesResponse contains the reponse fields from
|
||||
// GetPageRangesResponse contains the response fields from
|
||||
// Get Page Ranges call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
|
||||
|
@ -364,149 +218,14 @@ var (
|
|||
errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch")
|
||||
)
|
||||
|
||||
// ListContainers returns the list of containers in a storage account along with
|
||||
// pagination token and other response details.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
func (b BlobStorageClient) ListContainers(params ListContainersParameters) (ContainerListResponse, error) {
|
||||
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
|
||||
uri := b.client.getEndpoint(blobServiceName, "", q)
|
||||
headers := b.client.getStandardHeaders()
|
||||
|
||||
var out ContainerListResponse
|
||||
resp, err := b.client.exec("GET", uri, headers, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// CreateContainer creates a blob container within the storage account
|
||||
// with given name and access level. Returns error if container already exists.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx
|
||||
func (b BlobStorageClient) CreateContainer(name string, access ContainerAccessType) error {
|
||||
resp, err := b.createContainer(name, access)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// CreateContainerIfNotExists creates a blob container if it does not exist. Returns
|
||||
// true if container is newly created or false if container already exists.
|
||||
func (b BlobStorageClient) CreateContainerIfNotExists(name string, access ContainerAccessType) (bool, error) {
|
||||
resp, err := b.createContainer(name, access)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
||||
return resp.statusCode == http.StatusCreated, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (b BlobStorageClient) createContainer(name string, access ContainerAccessType) (*storageResponse, error) {
|
||||
verb := "PUT"
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}})
|
||||
|
||||
headers := b.client.getStandardHeaders()
|
||||
if access != "" {
|
||||
headers["x-ms-blob-public-access"] = string(access)
|
||||
}
|
||||
return b.client.exec(verb, uri, headers, nil)
|
||||
}
|
||||
|
||||
// ContainerExists returns true if a container with given name exists
|
||||
// on the storage account, otherwise returns false.
|
||||
func (b BlobStorageClient) ContainerExists(name string) (bool, error) {
|
||||
verb := "HEAD"
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}})
|
||||
headers := b.client.getStandardHeaders()
|
||||
|
||||
resp, err := b.client.exec(verb, uri, headers, nil)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusOK, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// DeleteContainer deletes the container with given name on the storage
|
||||
// account. If the container does not exist returns error.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
|
||||
func (b BlobStorageClient) DeleteContainer(name string) error {
|
||||
resp, err := b.deleteContainer(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
||||
}
|
||||
|
||||
// DeleteContainerIfExists deletes the container with given name on the storage
|
||||
// account if it exists. Returns true if container is deleted with this call, or
|
||||
// false if the container did not exist at the time of the Delete Container
|
||||
// operation.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
|
||||
func (b BlobStorageClient) DeleteContainerIfExists(name string) (bool, error) {
|
||||
resp, err := b.deleteContainer(name)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (b BlobStorageClient) deleteContainer(name string) (*storageResponse, error) {
|
||||
verb := "DELETE"
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}})
|
||||
|
||||
headers := b.client.getStandardHeaders()
|
||||
return b.client.exec(verb, uri, headers, nil)
|
||||
}
|
||||
|
||||
// ListBlobs returns an object that contains list of blobs in the container,
|
||||
// pagination token and other information in the response of List Blobs call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
||||
func (b BlobStorageClient) ListBlobs(container string, params ListBlobsParameters) (BlobListResponse, error) {
|
||||
q := mergeParams(params.getParameters(), url.Values{
|
||||
"restype": {"container"},
|
||||
"comp": {"list"}})
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), q)
|
||||
headers := b.client.getStandardHeaders()
|
||||
|
||||
var out BlobListResponse
|
||||
resp, err := b.client.exec("GET", uri, headers, nil)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// BlobExists returns true if a blob with given name exists on the specified
|
||||
// container of the storage account.
|
||||
func (b BlobStorageClient) BlobExists(container, name string) (bool, error) {
|
||||
verb := "HEAD"
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
|
||||
headers := b.client.getStandardHeaders()
|
||||
resp, err := b.client.exec(verb, uri, headers, nil)
|
||||
resp, err := b.client.exec(http.MethodHead, uri, headers, nil, b.auth)
|
||||
if resp != nil {
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusOK, nil
|
||||
}
|
||||
|
@ -515,14 +234,15 @@ func (b BlobStorageClient) BlobExists(container, name string) (bool, error) {
|
|||
}
|
||||
|
||||
// GetBlobURL gets the canonical URL to the blob with the specified name in the
|
||||
// specified container. This method does not create a publicly accessible URL if
|
||||
// the blob or container is private and this method does not check if the blob
|
||||
// exists.
|
||||
// specified container. If name is not specified, the canonical URL for the entire
|
||||
// container is obtained.
|
||||
// This method does not create a publicly accessible URL if the blob or container
|
||||
// is private and this method does not check if the blob exists.
|
||||
func (b BlobStorageClient) GetBlobURL(container, name string) string {
|
||||
if container == "" {
|
||||
container = "$root"
|
||||
}
|
||||
return b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
|
||||
return b.client.getEndpoint(blobServiceName, pathForResource(container, name), url.Values{})
|
||||
}
|
||||
|
||||
// GetBlob returns a stream to read the blob. Caller must call Close() the
|
||||
|
@ -558,9 +278,9 @@ func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string, extr
|
|||
}
|
||||
|
||||
func (b BlobStorageClient) getBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (*storageResponse, error) {
|
||||
verb := "GET"
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
|
||||
|
||||
extraHeaders = b.client.protectUserAgent(extraHeaders)
|
||||
headers := b.client.getStandardHeaders()
|
||||
if bytesRange != "" {
|
||||
headers["Range"] = fmt.Sprintf("bytes=%s", bytesRange)
|
||||
|
@ -570,23 +290,23 @@ func (b BlobStorageClient) getBlobRange(container, name, bytesRange string, extr
|
|||
headers[k] = v
|
||||
}
|
||||
|
||||
resp, err := b.client.exec(verb, uri, headers, nil)
|
||||
resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// leasePut is common PUT code for the various aquire/release/break etc functions.
|
||||
// leasePut is common PUT code for the various acquire/release/break etc functions.
|
||||
func (b BlobStorageClient) leaseCommonPut(container string, name string, headers map[string]string, expectedStatus int) (http.Header, error) {
|
||||
params := url.Values{"comp": {"lease"}}
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
|
||||
|
||||
resp, err := b.client.exec("PUT", uri, headers, nil)
|
||||
resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{expectedStatus}); err != nil {
|
||||
return nil, err
|
||||
|
@ -595,27 +315,78 @@ func (b BlobStorageClient) leaseCommonPut(container string, name string, headers
|
|||
return resp.headers, nil
|
||||
}
|
||||
|
||||
// SnapshotBlob creates a snapshot for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx
|
||||
func (b BlobStorageClient) SnapshotBlob(container string, name string, timeout int, extraHeaders map[string]string) (snapshotTimestamp *time.Time, err error) {
|
||||
extraHeaders = b.client.protectUserAgent(extraHeaders)
|
||||
headers := b.client.getStandardHeaders()
|
||||
params := url.Values{"comp": {"snapshot"}}
|
||||
|
||||
if timeout > 0 {
|
||||
params.Add("timeout", strconv.Itoa(timeout))
|
||||
}
|
||||
|
||||
for k, v := range extraHeaders {
|
||||
headers[k] = v
|
||||
}
|
||||
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
|
||||
resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth)
|
||||
if err != nil || resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
snapshotResponse := resp.headers.Get(http.CanonicalHeaderKey("x-ms-snapshot"))
|
||||
if snapshotResponse != "" {
|
||||
snapshotTimestamp, err := time.Parse(time.RFC3339, snapshotResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &snapshotTimestamp, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("Snapshot not created")
|
||||
}
|
||||
|
||||
// AcquireLease creates a lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
|
||||
// returns leaseID acquired
|
||||
// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum
|
||||
// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1.
|
||||
func (b BlobStorageClient) AcquireLease(container string, name string, leaseTimeInSeconds int, proposedLeaseID string) (returnedLeaseID string, err error) {
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers[leaseAction] = acquireLease
|
||||
headers[leaseProposedID] = proposedLeaseID
|
||||
|
||||
if leaseTimeInSeconds == -1 {
|
||||
// Do nothing, but don't trigger the following clauses.
|
||||
} else if leaseTimeInSeconds > 60 || b.client.apiVersion < "2012-02-12" {
|
||||
leaseTimeInSeconds = 60
|
||||
} else if leaseTimeInSeconds < 15 {
|
||||
leaseTimeInSeconds = 15
|
||||
}
|
||||
|
||||
headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds)
|
||||
|
||||
if proposedLeaseID != "" {
|
||||
headers[leaseProposedID] = proposedLeaseID
|
||||
}
|
||||
|
||||
respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusCreated)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(leaseID))
|
||||
returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID))
|
||||
|
||||
if returnedLeaseID != "" {
|
||||
return returnedLeaseID, nil
|
||||
}
|
||||
|
||||
// what should we return in case of HTTP 201 but no lease ID?
|
||||
// or it just cant happen? (brave words)
|
||||
return "", errors.New("LeaseID not returned")
|
||||
}
|
||||
|
||||
|
@ -661,7 +432,7 @@ func (b BlobStorageClient) breakLeaseCommon(container string, name string, heade
|
|||
func (b BlobStorageClient) ChangeLease(container string, name string, currentLeaseID string, proposedLeaseID string) (newLeaseID string, err error) {
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers[leaseAction] = changeLease
|
||||
headers[leaseID] = currentLeaseID
|
||||
headers[headerLeaseID] = currentLeaseID
|
||||
headers[leaseProposedID] = proposedLeaseID
|
||||
|
||||
respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusOK)
|
||||
|
@ -669,7 +440,7 @@ func (b BlobStorageClient) ChangeLease(container string, name string, currentLea
|
|||
return "", err
|
||||
}
|
||||
|
||||
newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(leaseID))
|
||||
newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID))
|
||||
if newLeaseID != "" {
|
||||
return newLeaseID, nil
|
||||
}
|
||||
|
@ -681,7 +452,7 @@ func (b BlobStorageClient) ChangeLease(container string, name string, currentLea
|
|||
func (b BlobStorageClient) ReleaseLease(container string, name string, currentLeaseID string) error {
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers[leaseAction] = releaseLease
|
||||
headers[leaseID] = currentLeaseID
|
||||
headers[headerLeaseID] = currentLeaseID
|
||||
|
||||
_, err := b.leaseCommonPut(container, name, headers, http.StatusOK)
|
||||
if err != nil {
|
||||
|
@ -695,7 +466,7 @@ func (b BlobStorageClient) ReleaseLease(container string, name string, currentLe
|
|||
func (b BlobStorageClient) RenewLease(container string, name string, currentLeaseID string) error {
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers[leaseAction] = renewLease
|
||||
headers[leaseID] = currentLeaseID
|
||||
headers[headerLeaseID] = currentLeaseID
|
||||
|
||||
_, err := b.leaseCommonPut(container, name, headers, http.StatusOK)
|
||||
if err != nil {
|
||||
|
@ -708,17 +479,16 @@ func (b BlobStorageClient) RenewLease(container string, name string, currentLeas
|
|||
// GetBlobProperties provides various information about the specified
|
||||
// blob. See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx
|
||||
func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobProperties, error) {
|
||||
verb := "HEAD"
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
|
||||
|
||||
headers := b.client.getStandardHeaders()
|
||||
resp, err := b.client.exec(verb, uri, headers, nil)
|
||||
resp, err := b.client.exec(http.MethodHead, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -758,6 +528,7 @@ func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobPrope
|
|||
CopyStatus: resp.headers.Get("x-ms-copy-status"),
|
||||
BlobType: BlobType(resp.headers.Get("x-ms-blob-type")),
|
||||
LeaseStatus: resp.headers.Get("x-ms-lease-status"),
|
||||
LeaseState: resp.headers.Get("x-ms-lease-state"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -780,11 +551,11 @@ func (b BlobStorageClient) SetBlobProperties(container, name string, blobHeaders
|
|||
headers[k] = v
|
||||
}
|
||||
|
||||
resp, err := b.client.exec("PUT", uri, headers, nil)
|
||||
resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
@ -800,6 +571,8 @@ func (b BlobStorageClient) SetBlobProperties(container, name string, blobHeaders
|
|||
func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[string]string, extraHeaders map[string]string) error {
|
||||
params := url.Values{"comp": {"metadata"}}
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
|
||||
metadata = b.client.protectUserAgent(metadata)
|
||||
extraHeaders = b.client.protectUserAgent(extraHeaders)
|
||||
headers := b.client.getStandardHeaders()
|
||||
for k, v := range metadata {
|
||||
headers[userDefinedMetadataHeaderPrefix+k] = v
|
||||
|
@ -809,11 +582,11 @@ func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[
|
|||
headers[k] = v
|
||||
}
|
||||
|
||||
resp, err := b.client.exec("PUT", uri, headers, nil)
|
||||
resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
@ -829,11 +602,11 @@ func (b BlobStorageClient) GetBlobMetadata(container, name string) (map[string]s
|
|||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
|
||||
headers := b.client.getStandardHeaders()
|
||||
|
||||
resp, err := b.client.exec("GET", uri, headers, nil)
|
||||
resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return nil, err
|
||||
|
@ -872,7 +645,7 @@ func (b BlobStorageClient) CreateBlockBlob(container, name string) error {
|
|||
// reader. Size must be the number of bytes read from reader. To
|
||||
// create an empty blob, use size==0 and reader==nil.
|
||||
//
|
||||
// The API rejects requests with size > 64 MiB (but this limit is not
|
||||
// The API rejects requests with size > 256 MiB (but this limit is not
|
||||
// checked by the SDK). To write a larger blob, use CreateBlockBlob,
|
||||
// PutBlock, and PutBlockList.
|
||||
//
|
||||
|
@ -880,6 +653,7 @@ func (b BlobStorageClient) CreateBlockBlob(container, name string) error {
|
|||
func (b BlobStorageClient) CreateBlockBlobFromReader(container, name string, size uint64, blob io.Reader, extraHeaders map[string]string) error {
|
||||
path := fmt.Sprintf("%s/%s", container, name)
|
||||
uri := b.client.getEndpoint(blobServiceName, path, url.Values{})
|
||||
extraHeaders = b.client.protectUserAgent(extraHeaders)
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers["x-ms-blob-type"] = string(BlobTypeBlock)
|
||||
headers["Content-Length"] = fmt.Sprintf("%d", size)
|
||||
|
@ -888,18 +662,18 @@ func (b BlobStorageClient) CreateBlockBlobFromReader(container, name string, siz
|
|||
headers[k] = v
|
||||
}
|
||||
|
||||
resp, err := b.client.exec("PUT", uri, headers, blob)
|
||||
resp, err := b.client.exec(http.MethodPut, uri, headers, blob, b.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// PutBlock saves the given data chunk to the specified block blob with
|
||||
// given ID.
|
||||
//
|
||||
// The API rejects chunks larger than 4 MiB (but this limit is not
|
||||
// The API rejects chunks larger than 100 MB (but this limit is not
|
||||
// checked by the SDK).
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx
|
||||
|
@ -911,12 +685,13 @@ func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byt
|
|||
// the block blob with given ID. It is an alternative to PutBlocks where data
|
||||
// comes as stream but the length is known in advance.
|
||||
//
|
||||
// The API rejects requests with size > 4 MiB (but this limit is not
|
||||
// The API rejects requests with size > 100 MB (but this limit is not
|
||||
// checked by the SDK).
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx
|
||||
func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader, extraHeaders map[string]string) error {
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockID}})
|
||||
extraHeaders = b.client.protectUserAgent(extraHeaders)
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers["x-ms-blob-type"] = string(BlobTypeBlock)
|
||||
headers["Content-Length"] = fmt.Sprintf("%v", size)
|
||||
|
@ -925,12 +700,12 @@ func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, s
|
|||
headers[k] = v
|
||||
}
|
||||
|
||||
resp, err := b.client.exec("PUT", uri, headers, blob)
|
||||
resp, err := b.client.exec(http.MethodPut, uri, headers, blob, b.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
|
@ -944,11 +719,11 @@ func (b BlobStorageClient) PutBlockList(container, name string, blocks []Block)
|
|||
headers := b.client.getStandardHeaders()
|
||||
headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML))
|
||||
|
||||
resp, err := b.client.exec("PUT", uri, headers, strings.NewReader(blockListXML))
|
||||
resp, err := b.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
|
@ -961,7 +736,7 @@ func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockL
|
|||
headers := b.client.getStandardHeaders()
|
||||
|
||||
var out BlockListResponse
|
||||
resp, err := b.client.exec("GET", uri, headers, nil)
|
||||
resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
|
@ -979,6 +754,7 @@ func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockL
|
|||
func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extraHeaders map[string]string) error {
|
||||
path := fmt.Sprintf("%s/%s", container, name)
|
||||
uri := b.client.getEndpoint(blobServiceName, path, url.Values{})
|
||||
extraHeaders = b.client.protectUserAgent(extraHeaders)
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers["x-ms-blob-type"] = string(BlobTypePage)
|
||||
headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", size)
|
||||
|
@ -987,11 +763,11 @@ func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extra
|
|||
headers[k] = v
|
||||
}
|
||||
|
||||
resp, err := b.client.exec("PUT", uri, headers, nil)
|
||||
resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
@ -1004,6 +780,7 @@ func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extra
|
|||
func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte, extraHeaders map[string]string) error {
|
||||
path := fmt.Sprintf("%s/%s", container, name)
|
||||
uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"page"}})
|
||||
extraHeaders = b.client.protectUserAgent(extraHeaders)
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers["x-ms-blob-type"] = string(BlobTypePage)
|
||||
headers["x-ms-page-write"] = string(writeType)
|
||||
|
@ -1022,11 +799,11 @@ func (b BlobStorageClient) PutPage(container, name string, startByte, endByte in
|
|||
}
|
||||
headers["Content-Length"] = fmt.Sprintf("%v", contentLength)
|
||||
|
||||
resp, err := b.client.exec("PUT", uri, headers, data)
|
||||
resp, err := b.client.exec(http.MethodPut, uri, headers, data, b.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
@ -1040,13 +817,13 @@ func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesR
|
|||
headers := b.client.getStandardHeaders()
|
||||
|
||||
var out GetPageRangesResponse
|
||||
resp, err := b.client.exec("GET", uri, headers, nil)
|
||||
resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return out, err
|
||||
}
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
|
@ -1060,6 +837,7 @@ func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesR
|
|||
func (b BlobStorageClient) PutAppendBlob(container, name string, extraHeaders map[string]string) error {
|
||||
path := fmt.Sprintf("%s/%s", container, name)
|
||||
uri := b.client.getEndpoint(blobServiceName, path, url.Values{})
|
||||
extraHeaders = b.client.protectUserAgent(extraHeaders)
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers["x-ms-blob-type"] = string(BlobTypeAppend)
|
||||
|
||||
|
@ -1067,11 +845,11 @@ func (b BlobStorageClient) PutAppendBlob(container, name string, extraHeaders ma
|
|||
headers[k] = v
|
||||
}
|
||||
|
||||
resp, err := b.client.exec("PUT", uri, headers, nil)
|
||||
resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
@ -1082,6 +860,7 @@ func (b BlobStorageClient) PutAppendBlob(container, name string, extraHeaders ma
|
|||
func (b BlobStorageClient) AppendBlock(container, name string, chunk []byte, extraHeaders map[string]string) error {
|
||||
path := fmt.Sprintf("%s/%s", container, name)
|
||||
uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"appendblock"}})
|
||||
extraHeaders = b.client.protectUserAgent(extraHeaders)
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers["x-ms-blob-type"] = string(BlobTypeAppend)
|
||||
headers["Content-Length"] = fmt.Sprintf("%v", len(chunk))
|
||||
|
@ -1090,11 +869,11 @@ func (b BlobStorageClient) AppendBlock(container, name string, chunk []byte, ext
|
|||
headers[k] = v
|
||||
}
|
||||
|
||||
resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk))
|
||||
resp, err := b.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
@ -1106,25 +885,30 @@ func (b BlobStorageClient) AppendBlock(container, name string, chunk []byte, ext
|
|||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx
|
||||
func (b BlobStorageClient) CopyBlob(container, name, sourceBlob string) error {
|
||||
copyID, err := b.startBlobCopy(container, name, sourceBlob)
|
||||
copyID, err := b.StartBlobCopy(container, name, sourceBlob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return b.waitForBlobCopy(container, name, copyID)
|
||||
return b.WaitForBlobCopy(container, name, copyID)
|
||||
}
|
||||
|
||||
func (b BlobStorageClient) startBlobCopy(container, name, sourceBlob string) (string, error) {
|
||||
// StartBlobCopy starts a blob copy operation.
|
||||
// sourceBlob parameter must be a canonical URL to the blob (can be
|
||||
// obtained using GetBlobURL method.)
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx
|
||||
func (b BlobStorageClient) StartBlobCopy(container, name, sourceBlob string) (string, error) {
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
|
||||
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers["x-ms-copy-source"] = sourceBlob
|
||||
|
||||
resp, err := b.client.exec("PUT", uri, headers, nil)
|
||||
resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil {
|
||||
return "", err
|
||||
|
@ -1137,7 +921,39 @@ func (b BlobStorageClient) startBlobCopy(container, name, sourceBlob string) (st
|
|||
return copyID, nil
|
||||
}
|
||||
|
||||
func (b BlobStorageClient) waitForBlobCopy(container, name, copyID string) error {
|
||||
// AbortBlobCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function.
|
||||
// copyID is generated from StartBlobCopy function.
|
||||
// currentLeaseID is required IF the destination blob has an active lease on it.
|
||||
// As defined in https://msdn.microsoft.com/en-us/library/azure/jj159098.aspx
|
||||
func (b BlobStorageClient) AbortBlobCopy(container, name, copyID, currentLeaseID string, timeout int) error {
|
||||
params := url.Values{"comp": {"copy"}, "copyid": {copyID}}
|
||||
if timeout > 0 {
|
||||
params.Add("timeout", strconv.Itoa(timeout))
|
||||
}
|
||||
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
|
||||
headers := b.client.getStandardHeaders()
|
||||
headers["x-ms-copy-action"] = "abort"
|
||||
|
||||
if currentLeaseID != "" {
|
||||
headers[headerLeaseID] = currentLeaseID
|
||||
}
|
||||
|
||||
resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForBlobCopy loops until a BlobCopy operation is completed (or fails with error)
|
||||
func (b BlobStorageClient) WaitForBlobCopy(container, name, copyID string) error {
|
||||
for {
|
||||
props, err := b.GetBlobProperties(container, name)
|
||||
if err != nil {
|
||||
|
@ -1171,7 +987,7 @@ func (b BlobStorageClient) DeleteBlob(container, name string, extraHeaders map[s
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
||||
}
|
||||
|
||||
|
@ -1181,27 +997,24 @@ func (b BlobStorageClient) DeleteBlob(container, name string, extraHeaders map[s
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx
|
||||
func (b BlobStorageClient) DeleteBlobIfExists(container, name string, extraHeaders map[string]string) (bool, error) {
|
||||
resp, err := b.deleteBlob(container, name, extraHeaders)
|
||||
if resp != nil && (resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound) {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
}
|
||||
defer resp.body.Close()
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (b BlobStorageClient) deleteBlob(container, name string, extraHeaders map[string]string) (*storageResponse, error) {
|
||||
verb := "DELETE"
|
||||
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
|
||||
extraHeaders = b.client.protectUserAgent(extraHeaders)
|
||||
headers := b.client.getStandardHeaders()
|
||||
for k, v := range extraHeaders {
|
||||
headers[k] = v
|
||||
}
|
||||
|
||||
return b.client.exec(verb, uri, headers, nil)
|
||||
}
|
||||
|
||||
// helper method to construct the path to a container given its name
|
||||
func pathForContainer(name string) string {
|
||||
return fmt.Sprintf("/%s", name)
|
||||
return b.client.exec(http.MethodDelete, uri, headers, nil, b.auth)
|
||||
}
|
||||
|
||||
// helper method to construct the path to a blob given its container and blob
|
||||
|
@ -1210,17 +1023,26 @@ func pathForBlob(container, name string) string {
|
|||
return fmt.Sprintf("/%s/%s", container, name)
|
||||
}
|
||||
|
||||
// GetBlobSASURI creates an URL to the specified blob which contains the Shared
|
||||
// Access Signature with specified permissions and expiration time.
|
||||
// helper method to construct the path to either a blob or container
|
||||
func pathForResource(container, name string) string {
|
||||
if len(name) > 0 {
|
||||
return fmt.Sprintf("/%s/%s", container, name)
|
||||
}
|
||||
return fmt.Sprintf("/%s", container)
|
||||
}
|
||||
|
||||
// GetBlobSASURIWithSignedIPAndProtocol creates an URL to the specified blob which contains the Shared
|
||||
// Access Signature with specified permissions and expiration time. Also includes signedIPRange and allowed protocols.
|
||||
// If old API version is used but no signedIP is passed (ie empty string) then this should still work.
|
||||
// We only populate the signedIP when it non-empty.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx
|
||||
func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Time, permissions string) (string, error) {
|
||||
func (b BlobStorageClient) GetBlobSASURIWithSignedIPAndProtocol(container, name string, expiry time.Time, permissions string, signedIPRange string, HTTPSOnly bool) (string, error) {
|
||||
var (
|
||||
signedPermissions = permissions
|
||||
blobURL = b.GetBlobURL(container, name)
|
||||
)
|
||||
canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL)
|
||||
|
||||
canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL, b.auth)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -1232,16 +1054,24 @@ func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Tim
|
|||
|
||||
// We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
|
||||
canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1)
|
||||
|
||||
canonicalizedResource, err = url.QueryUnescape(canonicalizedResource)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
signedExpiry := expiry.UTC().Format(time.RFC3339)
|
||||
signedResource := "b"
|
||||
|
||||
stringToSign, err := blobSASStringToSign(b.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions)
|
||||
//If blob name is missing, resource is a container
|
||||
signedResource := "c"
|
||||
if len(name) > 0 {
|
||||
signedResource = "b"
|
||||
}
|
||||
|
||||
protocols := "https,http"
|
||||
if HTTPSOnly {
|
||||
protocols = "https"
|
||||
}
|
||||
stringToSign, err := blobSASStringToSign(b.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions, signedIPRange, protocols)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -1255,6 +1085,13 @@ func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Tim
|
|||
"sig": {sig},
|
||||
}
|
||||
|
||||
if b.client.apiVersion >= "2015-04-05" {
|
||||
sasParams.Add("spr", protocols)
|
||||
if signedIPRange != "" {
|
||||
sasParams.Add("sip", signedIPRange)
|
||||
}
|
||||
}
|
||||
|
||||
sasURL, err := url.Parse(blobURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -1263,16 +1100,31 @@ func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Tim
|
|||
return sasURL.String(), nil
|
||||
}
|
||||
|
||||
func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string) (string, error) {
|
||||
// GetBlobSASURI creates an URL to the specified blob which contains the Shared
|
||||
// Access Signature with specified permissions and expiration time.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx
|
||||
func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Time, permissions string) (string, error) {
|
||||
url, err := b.GetBlobSASURIWithSignedIPAndProtocol(container, name, expiry, permissions, "", false)
|
||||
return url, err
|
||||
}
|
||||
|
||||
func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string, signedIP string, protocols string) (string, error) {
|
||||
var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string
|
||||
|
||||
if signedVersion >= "2015-02-21" {
|
||||
canonicalizedResource = "/blob" + canonicalizedResource
|
||||
}
|
||||
|
||||
// https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12
|
||||
if signedVersion >= "2015-04-05" {
|
||||
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
|
||||
}
|
||||
|
||||
// reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
||||
if signedVersion >= "2013-08-15" {
|
||||
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
|
||||
}
|
||||
|
||||
return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
|
||||
}
|
|
@ -0,0 +1,92 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// BlobStorageClient contains operations for Microsoft Azure Blob Storage
|
||||
// Service.
|
||||
type BlobStorageClient struct {
|
||||
client Client
|
||||
auth authentication
|
||||
}
|
||||
|
||||
// GetServiceProperties gets the properties of your storage account's blob service.
|
||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties
|
||||
func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) {
|
||||
return b.client.getServiceProperties(blobServiceName, b.auth)
|
||||
}
|
||||
|
||||
// SetServiceProperties sets the properties of your storage account's blob service.
|
||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties
|
||||
func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error {
|
||||
return b.client.setServiceProperties(props, blobServiceName, b.auth)
|
||||
}
|
||||
|
||||
// ListContainersParameters defines the set of customizable parameters to make a
|
||||
// List Containers call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
type ListContainersParameters struct {
|
||||
Prefix string
|
||||
Marker string
|
||||
Include string
|
||||
MaxResults uint
|
||||
Timeout uint
|
||||
}
|
||||
|
||||
// GetContainerReference returns a Container object for the specified container name.
|
||||
func (b BlobStorageClient) GetContainerReference(name string) Container {
|
||||
return Container{
|
||||
bsc: &b,
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// ListContainers returns the list of containers in a storage account along with
|
||||
// pagination token and other response details.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) {
|
||||
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
|
||||
uri := b.client.getEndpoint(blobServiceName, "", q)
|
||||
headers := b.client.getStandardHeaders()
|
||||
|
||||
var out ContainerListResponse
|
||||
resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
|
||||
// assign our client to the newly created Container objects
|
||||
for i := range out.Containers {
|
||||
out.Containers[i].bsc = &b
|
||||
}
|
||||
return &out, err
|
||||
}
|
||||
|
||||
func (p ListContainersParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Prefix != "" {
|
||||
out.Set("prefix", p.Prefix)
|
||||
}
|
||||
if p.Marker != "" {
|
||||
out.Set("marker", p.Marker)
|
||||
}
|
||||
if p.Include != "" {
|
||||
out.Set("include", p.Include)
|
||||
}
|
||||
if p.MaxResults != 0 {
|
||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
||||
}
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
|
@ -12,20 +12,21 @@ import (
|
|||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sort"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultBaseURL is the domain name used for storage requests when a
|
||||
// default client is created.
|
||||
// DefaultBaseURL is the domain name used for storage requests in the
|
||||
// public cloud when a default client is created.
|
||||
DefaultBaseURL = "core.windows.net"
|
||||
|
||||
// DefaultAPIVersion is the Azure Storage API version string used when a
|
||||
// DefaultAPIVersion is the Azure Storage API version string used when a
|
||||
// basic client is created.
|
||||
DefaultAPIVersion = "2015-02-21"
|
||||
DefaultAPIVersion = "2016-05-31"
|
||||
|
||||
defaultUseHTTPS = true
|
||||
|
||||
|
@ -43,6 +44,8 @@ const (
|
|||
storageEmulatorBlob = "127.0.0.1:10000"
|
||||
storageEmulatorTable = "127.0.0.1:10002"
|
||||
storageEmulatorQueue = "127.0.0.1:10001"
|
||||
|
||||
userAgentHeader = "User-Agent"
|
||||
)
|
||||
|
||||
// Client is the object that needs to be constructed to perform
|
||||
|
@ -52,11 +55,13 @@ type Client struct {
|
|||
// requests. If it is nil, http.DefaultClient is used.
|
||||
HTTPClient *http.Client
|
||||
|
||||
accountName string
|
||||
accountKey []byte
|
||||
useHTTPS bool
|
||||
baseURL string
|
||||
apiVersion string
|
||||
accountName string
|
||||
accountKey []byte
|
||||
useHTTPS bool
|
||||
UseSharedKeyLite bool
|
||||
baseURL string
|
||||
apiVersion string
|
||||
userAgent string
|
||||
}
|
||||
|
||||
type storageResponse struct {
|
||||
|
@ -130,6 +135,15 @@ func NewBasicClient(accountName, accountKey string) (Client, error) {
|
|||
return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS)
|
||||
}
|
||||
|
||||
// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and
|
||||
// key in the referenced cloud.
|
||||
func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) {
|
||||
if accountName == StorageEmulatorAccountName {
|
||||
return NewEmulatorClient()
|
||||
}
|
||||
return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS)
|
||||
}
|
||||
|
||||
//NewEmulatorClient contructs a Client intended to only work with Azure
|
||||
//Storage Emulator
|
||||
func NewEmulatorClient() (Client, error) {
|
||||
|
@ -154,13 +168,46 @@ func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, u
|
|||
return c, fmt.Errorf("azure: malformed storage account key: %v", err)
|
||||
}
|
||||
|
||||
return Client{
|
||||
accountName: accountName,
|
||||
accountKey: key,
|
||||
useHTTPS: useHTTPS,
|
||||
baseURL: blobServiceBaseURL,
|
||||
apiVersion: apiVersion,
|
||||
}, nil
|
||||
c = Client{
|
||||
accountName: accountName,
|
||||
accountKey: key,
|
||||
useHTTPS: useHTTPS,
|
||||
baseURL: blobServiceBaseURL,
|
||||
apiVersion: apiVersion,
|
||||
UseSharedKeyLite: false,
|
||||
}
|
||||
c.userAgent = c.getDefaultUserAgent()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c Client) getDefaultUserAgent() string {
|
||||
return fmt.Sprintf("Go/%s (%s-%s) Azure-SDK-For-Go/%s storage-dataplane/%s",
|
||||
runtime.Version(),
|
||||
runtime.GOARCH,
|
||||
runtime.GOOS,
|
||||
sdkVersion,
|
||||
c.apiVersion,
|
||||
)
|
||||
}
|
||||
|
||||
// AddToUserAgent adds an extension to the current user agent
|
||||
func (c *Client) AddToUserAgent(extension string) error {
|
||||
if extension != "" {
|
||||
c.userAgent = fmt.Sprintf("%s %s", c.userAgent, extension)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.userAgent)
|
||||
}
|
||||
|
||||
// protectUserAgent is used in funcs that include extraheaders as a parameter.
|
||||
// It prevents the User-Agent header to be overwritten, instead if it happens to
|
||||
// be present, it gets added to the current User-Agent. Use it before getStandardHeaders
|
||||
func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]string {
|
||||
if v, ok := extraheaders[userAgentHeader]; ok {
|
||||
c.AddToUserAgent(v)
|
||||
delete(extraheaders, userAgentHeader)
|
||||
}
|
||||
return extraheaders
|
||||
}
|
||||
|
||||
func (c Client) getBaseURL(service string) string {
|
||||
|
@ -212,181 +259,69 @@ func (c Client) getEndpoint(service, path string, params url.Values) string {
|
|||
// GetBlobService returns a BlobStorageClient which can operate on the blob
|
||||
// service of the storage account.
|
||||
func (c Client) GetBlobService() BlobStorageClient {
|
||||
return BlobStorageClient{c}
|
||||
b := BlobStorageClient{
|
||||
client: c,
|
||||
}
|
||||
b.client.AddToUserAgent(blobServiceName)
|
||||
b.auth = sharedKey
|
||||
if c.UseSharedKeyLite {
|
||||
b.auth = sharedKeyLite
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// GetQueueService returns a QueueServiceClient which can operate on the queue
|
||||
// service of the storage account.
|
||||
func (c Client) GetQueueService() QueueServiceClient {
|
||||
return QueueServiceClient{c}
|
||||
q := QueueServiceClient{
|
||||
client: c,
|
||||
}
|
||||
q.client.AddToUserAgent(queueServiceName)
|
||||
q.auth = sharedKey
|
||||
if c.UseSharedKeyLite {
|
||||
q.auth = sharedKeyLite
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
// GetTableService returns a TableServiceClient which can operate on the table
|
||||
// service of the storage account.
|
||||
func (c Client) GetTableService() TableServiceClient {
|
||||
return TableServiceClient{c}
|
||||
t := TableServiceClient{
|
||||
client: c,
|
||||
}
|
||||
t.client.AddToUserAgent(tableServiceName)
|
||||
t.auth = sharedKeyForTable
|
||||
if c.UseSharedKeyLite {
|
||||
t.auth = sharedKeyLiteForTable
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// GetFileService returns a FileServiceClient which can operate on the file
|
||||
// service of the storage account.
|
||||
func (c Client) GetFileService() FileServiceClient {
|
||||
return FileServiceClient{c}
|
||||
}
|
||||
|
||||
func (c Client) createAuthorizationHeader(canonicalizedString string) string {
|
||||
signature := c.computeHmac256(canonicalizedString)
|
||||
return fmt.Sprintf("%s %s:%s", "SharedKey", c.getCanonicalizedAccountName(), signature)
|
||||
}
|
||||
|
||||
func (c Client) getAuthorizationHeader(verb, url string, headers map[string]string) (string, error) {
|
||||
canonicalizedResource, err := c.buildCanonicalizedResource(url)
|
||||
if err != nil {
|
||||
return "", err
|
||||
f := FileServiceClient{
|
||||
client: c,
|
||||
}
|
||||
|
||||
canonicalizedString := c.buildCanonicalizedString(verb, headers, canonicalizedResource)
|
||||
return c.createAuthorizationHeader(canonicalizedString), nil
|
||||
f.client.AddToUserAgent(fileServiceName)
|
||||
f.auth = sharedKey
|
||||
if c.UseSharedKeyLite {
|
||||
f.auth = sharedKeyLite
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func (c Client) getStandardHeaders() map[string]string {
|
||||
return map[string]string{
|
||||
"x-ms-version": c.apiVersion,
|
||||
"x-ms-date": currentTimeRfc1123Formatted(),
|
||||
userAgentHeader: c.userAgent,
|
||||
"x-ms-version": c.apiVersion,
|
||||
"x-ms-date": currentTimeRfc1123Formatted(),
|
||||
}
|
||||
}
|
||||
|
||||
func (c Client) getCanonicalizedAccountName() string {
|
||||
// since we may be trying to access a secondary storage account, we need to
|
||||
// remove the -secondary part of the storage name
|
||||
return strings.TrimSuffix(c.accountName, "-secondary")
|
||||
}
|
||||
|
||||
func (c Client) buildCanonicalizedHeader(headers map[string]string) string {
|
||||
cm := make(map[string]string)
|
||||
|
||||
for k, v := range headers {
|
||||
headerName := strings.TrimSpace(strings.ToLower(k))
|
||||
match, _ := regexp.MatchString("x-ms-", headerName)
|
||||
if match {
|
||||
cm[headerName] = v
|
||||
}
|
||||
}
|
||||
|
||||
if len(cm) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
keys := make([]string, 0, len(cm))
|
||||
for key := range cm {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
|
||||
ch := ""
|
||||
|
||||
for i, key := range keys {
|
||||
if i == len(keys)-1 {
|
||||
ch += fmt.Sprintf("%s:%s", key, cm[key])
|
||||
} else {
|
||||
ch += fmt.Sprintf("%s:%s\n", key, cm[key])
|
||||
}
|
||||
}
|
||||
return ch
|
||||
}
|
||||
|
||||
func (c Client) buildCanonicalizedResourceTable(uri string) (string, error) {
|
||||
errMsg := "buildCanonicalizedResourceTable error: %s"
|
||||
u, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(errMsg, err.Error())
|
||||
}
|
||||
|
||||
cr := "/" + c.getCanonicalizedAccountName()
|
||||
|
||||
if len(u.Path) > 0 {
|
||||
cr += u.Path
|
||||
}
|
||||
|
||||
return cr, nil
|
||||
}
|
||||
|
||||
func (c Client) buildCanonicalizedResource(uri string) (string, error) {
|
||||
errMsg := "buildCanonicalizedResource error: %s"
|
||||
u, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(errMsg, err.Error())
|
||||
}
|
||||
|
||||
cr := "/" + c.getCanonicalizedAccountName()
|
||||
|
||||
if len(u.Path) > 0 {
|
||||
// Any portion of the CanonicalizedResource string that is derived from
|
||||
// the resource's URI should be encoded exactly as it is in the URI.
|
||||
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
|
||||
cr += u.EscapedPath()
|
||||
}
|
||||
|
||||
params, err := url.ParseQuery(u.RawQuery)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(errMsg, err.Error())
|
||||
}
|
||||
|
||||
if len(params) > 0 {
|
||||
cr += "\n"
|
||||
keys := make([]string, 0, len(params))
|
||||
for key := range params {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
|
||||
for i, key := range keys {
|
||||
if len(params[key]) > 1 {
|
||||
sort.Strings(params[key])
|
||||
}
|
||||
|
||||
if i == len(keys)-1 {
|
||||
cr += fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))
|
||||
} else {
|
||||
cr += fmt.Sprintf("%s:%s\n", key, strings.Join(params[key], ","))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cr, nil
|
||||
}
|
||||
|
||||
func (c Client) buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string) string {
|
||||
contentLength := headers["Content-Length"]
|
||||
if contentLength == "0" {
|
||||
contentLength = ""
|
||||
}
|
||||
canonicalizedString := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s",
|
||||
verb,
|
||||
headers["Content-Encoding"],
|
||||
headers["Content-Language"],
|
||||
contentLength,
|
||||
headers["Content-MD5"],
|
||||
headers["Content-Type"],
|
||||
headers["Date"],
|
||||
headers["If-Modified-Since"],
|
||||
headers["If-Match"],
|
||||
headers["If-None-Match"],
|
||||
headers["If-Unmodified-Since"],
|
||||
headers["Range"],
|
||||
c.buildCanonicalizedHeader(headers),
|
||||
canonicalizedResource)
|
||||
|
||||
return canonicalizedString
|
||||
}
|
||||
|
||||
func (c Client) exec(verb, url string, headers map[string]string, body io.Reader) (*storageResponse, error) {
|
||||
authHeader, err := c.getAuthorizationHeader(verb, url, headers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headers["Authorization"] = authHeader
|
||||
func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*storageResponse, error) {
|
||||
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -422,17 +357,18 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader
|
|||
statusCode := resp.StatusCode
|
||||
if statusCode >= 400 && statusCode <= 505 {
|
||||
var respBody []byte
|
||||
respBody, err = readResponseBody(resp)
|
||||
respBody, err = readAndCloseBody(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestID := resp.Header.Get("x-ms-request-id")
|
||||
if len(respBody) == 0 {
|
||||
// no error in response body
|
||||
err = fmt.Errorf("storage: service returned without a response body (%s)", resp.Status)
|
||||
// no error in response body, might happen in HEAD requests
|
||||
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID)
|
||||
} else {
|
||||
// response contains storage service error object, unmarshal
|
||||
storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, resp.Header.Get("x-ms-request-id"))
|
||||
storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, requestID)
|
||||
if err != nil { // error unmarshaling the error response
|
||||
err = errIn
|
||||
}
|
||||
|
@ -451,7 +387,12 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader
|
|||
body: resp.Body}, nil
|
||||
}
|
||||
|
||||
func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) {
|
||||
func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
|
||||
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(verb, url, body)
|
||||
for k, v := range headers {
|
||||
req.Header.Add(k, v)
|
||||
|
@ -475,14 +416,14 @@ func (c Client) execInternalJSON(verb, url string, headers map[string]string, bo
|
|||
statusCode := resp.StatusCode
|
||||
if statusCode >= 400 && statusCode <= 505 {
|
||||
var respBody []byte
|
||||
respBody, err = readResponseBody(resp)
|
||||
respBody, err = readAndCloseBody(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(respBody) == 0 {
|
||||
// no error in response body
|
||||
err = fmt.Errorf("storage: service returned without a response body (%d)", resp.StatusCode)
|
||||
// no error in response body, might happen in HEAD requests
|
||||
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, resp.Header.Get("x-ms-request-id"))
|
||||
return respToRet, err
|
||||
}
|
||||
// try unmarshal as odata.error json
|
||||
|
@ -493,31 +434,9 @@ func (c Client) execInternalJSON(verb, url string, headers map[string]string, bo
|
|||
return respToRet, nil
|
||||
}
|
||||
|
||||
func (c Client) createSharedKeyLite(url string, headers map[string]string) (string, error) {
|
||||
can, err := c.buildCanonicalizedResourceTable(url)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
strToSign := headers["x-ms-date"] + "\n" + can
|
||||
|
||||
hmac := c.computeHmac256(strToSign)
|
||||
return fmt.Sprintf("SharedKeyLite %s:%s", c.accountName, hmac), nil
|
||||
}
|
||||
|
||||
func (c Client) execTable(verb, url string, headers map[string]string, body io.Reader) (*odataResponse, error) {
|
||||
var err error
|
||||
headers["Authorization"], err = c.createSharedKeyLite(url, headers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c.execInternalJSON(verb, url, headers, body)
|
||||
}
|
||||
|
||||
func readResponseBody(resp *http.Response) ([]byte, error) {
|
||||
defer resp.Body.Close()
|
||||
out, err := ioutil.ReadAll(resp.Body)
|
||||
func readAndCloseBody(body io.ReadCloser) ([]byte, error) {
|
||||
defer body.Close()
|
||||
out, err := ioutil.ReadAll(body)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
|
@ -534,6 +453,15 @@ func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStor
|
|||
return storageErr, nil
|
||||
}
|
||||
|
||||
func serviceErrFromStatusCode(code int, status string, requestID string) AzureStorageServiceError {
|
||||
return AzureStorageServiceError{
|
||||
StatusCode: code,
|
||||
Code: status,
|
||||
RequestID: requestID,
|
||||
Message: "no response body was available for error status code",
|
||||
}
|
||||
}
|
||||
|
||||
func (e AzureStorageServiceError) Error() string {
|
||||
return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s, QueryParameterName=%s, QueryParameterValue=%s",
|
||||
e.StatusCode, e.Code, e.Message, e.RequestID, e.QueryParameterName, e.QueryParameterValue)
|
|
@ -0,0 +1,376 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Container represents an Azure container.
|
||||
type Container struct {
|
||||
bsc *BlobStorageClient
|
||||
Name string `xml:"Name"`
|
||||
Properties ContainerProperties `xml:"Properties"`
|
||||
}
|
||||
|
||||
func (c *Container) buildPath() string {
|
||||
return fmt.Sprintf("/%s", c.Name)
|
||||
}
|
||||
|
||||
// ContainerProperties contains various properties of a container returned from
|
||||
// various endpoints like ListContainers.
|
||||
type ContainerProperties struct {
|
||||
LastModified string `xml:"Last-Modified"`
|
||||
Etag string `xml:"Etag"`
|
||||
LeaseStatus string `xml:"LeaseStatus"`
|
||||
LeaseState string `xml:"LeaseState"`
|
||||
LeaseDuration string `xml:"LeaseDuration"`
|
||||
}
|
||||
|
||||
// ContainerListResponse contains the response fields from
|
||||
// ListContainers call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
type ContainerListResponse struct {
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
MaxResults int64 `xml:"MaxResults"`
|
||||
Containers []Container `xml:"Containers>Container"`
|
||||
}
|
||||
|
||||
// BlobListResponse contains the response fields from ListBlobs call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
||||
type BlobListResponse struct {
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
MaxResults int64 `xml:"MaxResults"`
|
||||
Blobs []Blob `xml:"Blobs>Blob"`
|
||||
|
||||
// BlobPrefix is used to traverse blobs as if it were a file system.
|
||||
// It is returned if ListBlobsParameters.Delimiter is specified.
|
||||
// The list here can be thought of as "folders" that may contain
|
||||
// other folders or blobs.
|
||||
BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"`
|
||||
|
||||
// Delimiter is used to traverse blobs as if it were a file system.
|
||||
// It is returned if ListBlobsParameters.Delimiter is specified.
|
||||
Delimiter string `xml:"Delimiter"`
|
||||
}
|
||||
|
||||
// ListBlobsParameters defines the set of customizable
|
||||
// parameters to make a List Blobs call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
||||
type ListBlobsParameters struct {
|
||||
Prefix string
|
||||
Delimiter string
|
||||
Marker string
|
||||
Include string
|
||||
MaxResults uint
|
||||
Timeout uint
|
||||
}
|
||||
|
||||
func (p ListBlobsParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Prefix != "" {
|
||||
out.Set("prefix", p.Prefix)
|
||||
}
|
||||
if p.Delimiter != "" {
|
||||
out.Set("delimiter", p.Delimiter)
|
||||
}
|
||||
if p.Marker != "" {
|
||||
out.Set("marker", p.Marker)
|
||||
}
|
||||
if p.Include != "" {
|
||||
out.Set("include", p.Include)
|
||||
}
|
||||
if p.MaxResults != 0 {
|
||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
||||
}
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// ContainerAccessType defines the access level to the container from a public
|
||||
// request.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms-
|
||||
// blob-public-access" header.
|
||||
type ContainerAccessType string
|
||||
|
||||
// Access options for containers
|
||||
const (
|
||||
ContainerAccessTypePrivate ContainerAccessType = ""
|
||||
ContainerAccessTypeBlob ContainerAccessType = "blob"
|
||||
ContainerAccessTypeContainer ContainerAccessType = "container"
|
||||
)
|
||||
|
||||
// ContainerAccessPolicy represents each access policy in the container ACL.
|
||||
type ContainerAccessPolicy struct {
|
||||
ID string
|
||||
StartTime time.Time
|
||||
ExpiryTime time.Time
|
||||
CanRead bool
|
||||
CanWrite bool
|
||||
CanDelete bool
|
||||
}
|
||||
|
||||
// ContainerPermissions represents the container ACLs.
|
||||
type ContainerPermissions struct {
|
||||
AccessType ContainerAccessType
|
||||
AccessPolicies []ContainerAccessPolicy
|
||||
}
|
||||
|
||||
// ContainerAccessHeader references header used when setting/getting container ACL
|
||||
const (
|
||||
ContainerAccessHeader string = "x-ms-blob-public-access"
|
||||
)
|
||||
|
||||
// Create creates a blob container within the storage account
|
||||
// with given name and access level. Returns error if container already exists.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx
|
||||
func (c *Container) Create() error {
|
||||
resp, err := c.create()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// CreateIfNotExists creates a blob container if it does not exist. Returns
|
||||
// true if container is newly created or false if container already exists.
|
||||
func (c *Container) CreateIfNotExists() (bool, error) {
|
||||
resp, err := c.create()
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
||||
return resp.statusCode == http.StatusCreated, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (c *Container) create() (*storageResponse, error) {
|
||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}})
|
||||
headers := c.bsc.client.getStandardHeaders()
|
||||
return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
|
||||
}
|
||||
|
||||
// Exists returns true if a container with given name exists
|
||||
// on the storage account, otherwise returns false.
|
||||
func (c *Container) Exists() (bool, error) {
|
||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}})
|
||||
headers := c.bsc.client.getStandardHeaders()
|
||||
|
||||
resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth)
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusOK, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// SetPermissions sets up container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179391.aspx
|
||||
func (c *Container) SetPermissions(permissions ContainerPermissions, timeout int, leaseID string) error {
|
||||
params := url.Values{
|
||||
"restype": {"container"},
|
||||
"comp": {"acl"},
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
params.Add("timeout", strconv.Itoa(timeout))
|
||||
}
|
||||
|
||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
||||
headers := c.bsc.client.getStandardHeaders()
|
||||
if permissions.AccessType != "" {
|
||||
headers[ContainerAccessHeader] = string(permissions.AccessType)
|
||||
}
|
||||
|
||||
if leaseID != "" {
|
||||
headers[headerLeaseID] = leaseID
|
||||
}
|
||||
|
||||
body, length, err := generateContainerACLpayload(permissions.AccessPolicies)
|
||||
headers["Content-Length"] = strconv.Itoa(length)
|
||||
|
||||
resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return errors.New("Unable to set permissions")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx
|
||||
// If timeout is 0 then it will not be passed to Azure
|
||||
// leaseID will only be passed to Azure if populated
|
||||
func (c *Container) GetPermissions(timeout int, leaseID string) (*ContainerPermissions, error) {
|
||||
params := url.Values{
|
||||
"restype": {"container"},
|
||||
"comp": {"acl"},
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
params.Add("timeout", strconv.Itoa(timeout))
|
||||
}
|
||||
|
||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
||||
headers := c.bsc.client.getStandardHeaders()
|
||||
|
||||
if leaseID != "" {
|
||||
headers[headerLeaseID] = leaseID
|
||||
}
|
||||
|
||||
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
var ap AccessPolicy
|
||||
err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buildAccessPolicy(ap, &resp.headers), nil
|
||||
}
|
||||
|
||||
func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions {
|
||||
// containerAccess. Blob, Container, empty
|
||||
containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader))
|
||||
permissions := ContainerPermissions{
|
||||
AccessType: ContainerAccessType(containerAccess),
|
||||
AccessPolicies: []ContainerAccessPolicy{},
|
||||
}
|
||||
|
||||
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
|
||||
capd := ContainerAccessPolicy{
|
||||
ID: policy.ID,
|
||||
StartTime: policy.AccessPolicy.StartTime,
|
||||
ExpiryTime: policy.AccessPolicy.ExpiryTime,
|
||||
}
|
||||
capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
|
||||
capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w")
|
||||
capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
|
||||
|
||||
permissions.AccessPolicies = append(permissions.AccessPolicies, capd)
|
||||
}
|
||||
return &permissions
|
||||
}
|
||||
|
||||
// Delete deletes the container with given name on the storage
|
||||
// account. If the container does not exist returns error.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
|
||||
func (c *Container) Delete() error {
|
||||
resp, err := c.delete()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
||||
}
|
||||
|
||||
// DeleteIfExists deletes the container with given name on the storage
|
||||
// account if it exists. Returns true if container is deleted with this call, or
|
||||
// false if the container did not exist at the time of the Delete Container
|
||||
// operation.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
|
||||
func (c *Container) DeleteIfExists() (bool, error) {
|
||||
resp, err := c.delete()
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (c *Container) delete() (*storageResponse, error) {
|
||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}})
|
||||
headers := c.bsc.client.getStandardHeaders()
|
||||
return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth)
|
||||
}
|
||||
|
||||
// ListBlobs returns an object that contains list of blobs in the container,
|
||||
// pagination token and other information in the response of List Blobs call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
||||
func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) {
|
||||
q := mergeParams(params.getParameters(), url.Values{
|
||||
"restype": {"container"},
|
||||
"comp": {"list"}},
|
||||
)
|
||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
|
||||
headers := c.bsc.client.getStandardHeaders()
|
||||
|
||||
var out BlobListResponse
|
||||
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) {
|
||||
sil := SignedIdentifiers{
|
||||
SignedIdentifiers: []SignedIdentifier{},
|
||||
}
|
||||
for _, capd := range policies {
|
||||
permission := capd.generateContainerPermissions()
|
||||
signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission)
|
||||
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
|
||||
}
|
||||
return xmlMarshal(sil)
|
||||
}
|
||||
|
||||
func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) {
|
||||
// generate the permissions string (rwd).
|
||||
// still want the end user API to have bool flags.
|
||||
permissions = ""
|
||||
|
||||
if capd.CanRead {
|
||||
permissions += "r"
|
||||
}
|
||||
|
||||
if capd.CanWrite {
|
||||
permissions += "w"
|
||||
}
|
||||
|
||||
if capd.CanDelete {
|
||||
permissions += "d"
|
||||
}
|
||||
|
||||
return permissions
|
||||
}
|
|
@ -0,0 +1,217 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// Directory represents a directory on a share.
|
||||
type Directory struct {
|
||||
fsc *FileServiceClient
|
||||
Metadata map[string]string
|
||||
Name string `xml:"Name"`
|
||||
parent *Directory
|
||||
Properties DirectoryProperties
|
||||
share *Share
|
||||
}
|
||||
|
||||
// DirectoryProperties contains various properties of a directory.
|
||||
type DirectoryProperties struct {
|
||||
LastModified string `xml:"Last-Modified"`
|
||||
Etag string `xml:"Etag"`
|
||||
}
|
||||
|
||||
// ListDirsAndFilesParameters defines the set of customizable parameters to
|
||||
// make a List Files and Directories call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx
|
||||
type ListDirsAndFilesParameters struct {
|
||||
Marker string
|
||||
MaxResults uint
|
||||
Timeout uint
|
||||
}
|
||||
|
||||
// DirsAndFilesListResponse contains the response fields from
|
||||
// a List Files and Directories call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx
|
||||
type DirsAndFilesListResponse struct {
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
Marker string `xml:"Marker"`
|
||||
MaxResults int64 `xml:"MaxResults"`
|
||||
Directories []Directory `xml:"Entries>Directory"`
|
||||
Files []File `xml:"Entries>File"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
}
|
||||
|
||||
// builds the complete directory path for this directory object.
|
||||
func (d *Directory) buildPath() string {
|
||||
path := ""
|
||||
current := d
|
||||
for current.Name != "" {
|
||||
path = "/" + current.Name + path
|
||||
current = current.parent
|
||||
}
|
||||
return d.share.buildPath() + path
|
||||
}
|
||||
|
||||
// Create this directory in the associated share.
|
||||
// If a directory with the same name already exists, the operation fails.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx
|
||||
func (d *Directory) Create() error {
|
||||
// if this is the root directory exit early
|
||||
if d.parent == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, nil, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateIfNotExists creates this directory under the associated share if the
|
||||
// directory does not exists. Returns true if the directory is newly created or
|
||||
// false if the directory already exists.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx
|
||||
func (d *Directory) CreateIfNotExists() (bool, error) {
|
||||
// if this is the root directory exit early
|
||||
if d.parent == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, nil, nil)
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
||||
if resp.statusCode == http.StatusCreated {
|
||||
d.updateEtagAndLastModified(resp.headers)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, d.FetchAttributes()
|
||||
}
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Delete removes this directory. It must be empty in order to be deleted.
|
||||
// If the directory does not exist the operation fails.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx
|
||||
func (d *Directory) Delete() error {
|
||||
return d.fsc.deleteResource(d.buildPath(), resourceDirectory)
|
||||
}
|
||||
|
||||
// DeleteIfExists removes this directory if it exists.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx
|
||||
func (d *Directory) DeleteIfExists() (bool, error) {
|
||||
resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory)
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Exists returns true if this directory exists.
|
||||
func (d *Directory) Exists() (bool, error) {
|
||||
exists, headers, err := d.fsc.resourceExists(d.buildPath(), resourceDirectory)
|
||||
if exists {
|
||||
d.updateEtagAndLastModified(headers)
|
||||
}
|
||||
return exists, err
|
||||
}
|
||||
|
||||
// FetchAttributes retrieves metadata for this directory.
|
||||
func (d *Directory) FetchAttributes() error {
|
||||
headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, http.MethodHead)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.updateEtagAndLastModified(headers)
|
||||
d.Metadata = getMetadataFromHeaders(headers)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDirectoryReference returns a child Directory object for this directory.
|
||||
func (d *Directory) GetDirectoryReference(name string) *Directory {
|
||||
return &Directory{
|
||||
fsc: d.fsc,
|
||||
Name: name,
|
||||
parent: d,
|
||||
share: d.share,
|
||||
}
|
||||
}
|
||||
|
||||
// GetFileReference returns a child File object for this directory.
|
||||
func (d *Directory) GetFileReference(name string) *File {
|
||||
return &File{
|
||||
fsc: d.fsc,
|
||||
Name: name,
|
||||
parent: d,
|
||||
share: d.share,
|
||||
}
|
||||
}
|
||||
|
||||
// ListDirsAndFiles returns a list of files and directories under this directory.
|
||||
// It also contains a pagination token and other response details.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx
|
||||
func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) {
|
||||
q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory))
|
||||
|
||||
resp, err := d.fsc.listContent(d.buildPath(), q, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer resp.body.Close()
|
||||
var out DirsAndFilesListResponse
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
return &out, err
|
||||
}
|
||||
|
||||
// SetMetadata replaces the metadata for this directory.
|
||||
//
|
||||
// Some keys may be converted to Camel-Case before sending. All keys
|
||||
// are returned in lower case by GetDirectoryMetadata. HTTP header names
|
||||
// are case-insensitive so case munging should not matter to other
|
||||
// applications either.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/mt427370.aspx
|
||||
func (d *Directory) SetMetadata() error {
|
||||
headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// updates Etag and last modified date
|
||||
func (d *Directory) updateEtagAndLastModified(headers http.Header) {
|
||||
d.Properties.Etag = headers.Get("Etag")
|
||||
d.Properties.LastModified = headers.Get("Last-Modified")
|
||||
}
|
||||
|
||||
// URL gets the canonical URL to this directory.
|
||||
// This method does not create a publicly accessible URL if the directory
|
||||
// is private and this method does not check if the directory exists.
|
||||
func (d *Directory) URL() string {
|
||||
return d.fsc.client.getEndpoint(fileServiceName, d.buildPath(), url.Values{})
|
||||
}
|
|
@ -0,0 +1,412 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const fourMB = uint64(4194304)
|
||||
const oneTB = uint64(1099511627776)
|
||||
|
||||
// File represents a file on a share.
|
||||
type File struct {
|
||||
fsc *FileServiceClient
|
||||
Metadata map[string]string
|
||||
Name string `xml:"Name"`
|
||||
parent *Directory
|
||||
Properties FileProperties `xml:"Properties"`
|
||||
share *Share
|
||||
FileCopyProperties FileCopyState
|
||||
}
|
||||
|
||||
// FileProperties contains various properties of a file.
|
||||
type FileProperties struct {
|
||||
CacheControl string `header:"x-ms-cache-control"`
|
||||
Disposition string `header:"x-ms-content-disposition"`
|
||||
Encoding string `header:"x-ms-content-encoding"`
|
||||
Etag string
|
||||
Language string `header:"x-ms-content-language"`
|
||||
LastModified string
|
||||
Length uint64 `xml:"Content-Length"`
|
||||
MD5 string `header:"x-ms-content-md5"`
|
||||
Type string `header:"x-ms-content-type"`
|
||||
}
|
||||
|
||||
// FileCopyState contains various properties of a file copy operation.
|
||||
type FileCopyState struct {
|
||||
CompletionTime string
|
||||
ID string `header:"x-ms-copy-id"`
|
||||
Progress string
|
||||
Source string
|
||||
Status string `header:"x-ms-copy-status"`
|
||||
StatusDesc string
|
||||
}
|
||||
|
||||
// FileStream contains file data returned from a call to GetFile.
|
||||
type FileStream struct {
|
||||
Body io.ReadCloser
|
||||
ContentMD5 string
|
||||
}
|
||||
|
||||
// FileRequestOptions will be passed to misc file operations.
|
||||
// Currently just Timeout (in seconds) but will expand.
|
||||
type FileRequestOptions struct {
|
||||
Timeout uint // timeout duration in seconds.
|
||||
}
|
||||
|
||||
// getParameters, construct parameters for FileRequestOptions.
|
||||
// currently only timeout, but expecting to grow as functionality fills out.
|
||||
func (p FileRequestOptions) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// FileRanges contains a list of file range information for a file.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx
|
||||
type FileRanges struct {
|
||||
ContentLength uint64
|
||||
LastModified string
|
||||
ETag string
|
||||
FileRanges []FileRange `xml:"Range"`
|
||||
}
|
||||
|
||||
// FileRange contains range information for a file.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx
|
||||
type FileRange struct {
|
||||
Start uint64 `xml:"Start"`
|
||||
End uint64 `xml:"End"`
|
||||
}
|
||||
|
||||
func (fr FileRange) String() string {
|
||||
return fmt.Sprintf("bytes=%d-%d", fr.Start, fr.End)
|
||||
}
|
||||
|
||||
// builds the complete file path for this file object
|
||||
func (f *File) buildPath() string {
|
||||
return f.parent.buildPath() + "/" + f.Name
|
||||
}
|
||||
|
||||
// ClearRange releases the specified range of space in a file.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx
|
||||
func (f *File) ClearRange(fileRange FileRange) error {
|
||||
headers, err := f.modifyRange(nil, fileRange, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create creates a new file or replaces an existing one.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn194271.aspx
|
||||
func (f *File) Create(maxSize uint64) error {
|
||||
if maxSize > oneTB {
|
||||
return fmt.Errorf("max file size is 1TB")
|
||||
}
|
||||
|
||||
extraHeaders := map[string]string{
|
||||
"x-ms-content-length": strconv.FormatUint(maxSize, 10),
|
||||
"x-ms-type": "file",
|
||||
}
|
||||
|
||||
headers, err := f.fsc.createResource(f.buildPath(), resourceFile, nil, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusCreated})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.Properties.Length = maxSize
|
||||
f.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyFile operation copied a file/blob from the sourceURL to the path provided.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/copy-file
|
||||
func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error {
|
||||
extraHeaders := map[string]string{
|
||||
"x-ms-type": "file",
|
||||
"x-ms-copy-source": sourceURL,
|
||||
}
|
||||
|
||||
var parameters url.Values
|
||||
if options != nil {
|
||||
parameters = options.getParameters()
|
||||
}
|
||||
|
||||
headers, err := f.fsc.createResource(f.buildPath(), resourceFile, parameters, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.updateEtagLastModifiedAndCopyHeaders(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete immediately removes this file from the storage account.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx
|
||||
func (f *File) Delete() error {
|
||||
return f.fsc.deleteResource(f.buildPath(), resourceFile)
|
||||
}
|
||||
|
||||
// DeleteIfExists removes this file if it exists.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx
|
||||
func (f *File) DeleteIfExists() (bool, error) {
|
||||
resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile)
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
|
||||
func (f *File) DownloadRangeToStream(fileRange FileRange, getContentMD5 bool) (fs FileStream, err error) {
|
||||
if getContentMD5 && isRangeTooBig(fileRange) {
|
||||
return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true")
|
||||
}
|
||||
|
||||
extraHeaders := map[string]string{
|
||||
"Range": fileRange.String(),
|
||||
}
|
||||
if getContentMD5 == true {
|
||||
extraHeaders["x-ms-range-get-content-md5"] = "true"
|
||||
}
|
||||
|
||||
resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, http.MethodGet, extraHeaders)
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
|
||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK, http.StatusPartialContent}); err != nil {
|
||||
resp.body.Close()
|
||||
return fs, err
|
||||
}
|
||||
|
||||
fs.Body = resp.body
|
||||
if getContentMD5 {
|
||||
fs.ContentMD5 = resp.headers.Get("Content-MD5")
|
||||
}
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
// Exists returns true if this file exists.
|
||||
func (f *File) Exists() (bool, error) {
|
||||
exists, headers, err := f.fsc.resourceExists(f.buildPath(), resourceFile)
|
||||
if exists {
|
||||
f.updateEtagAndLastModified(headers)
|
||||
f.updateProperties(headers)
|
||||
}
|
||||
return exists, err
|
||||
}
|
||||
|
||||
// FetchAttributes updates metadata and properties for this file.
|
||||
func (f *File) FetchAttributes() error {
|
||||
headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, http.MethodHead)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.updateEtagAndLastModified(headers)
|
||||
f.updateProperties(headers)
|
||||
f.Metadata = getMetadataFromHeaders(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// returns true if the range is larger than 4MB
|
||||
func isRangeTooBig(fileRange FileRange) bool {
|
||||
if fileRange.End-fileRange.Start > fourMB {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// ListRanges returns the list of valid ranges for this file.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx
|
||||
func (f *File) ListRanges(listRange *FileRange) (*FileRanges, error) {
|
||||
params := url.Values{"comp": {"rangelist"}}
|
||||
|
||||
// add optional range to list
|
||||
var headers map[string]string
|
||||
if listRange != nil {
|
||||
headers = make(map[string]string)
|
||||
headers["Range"] = listRange.String()
|
||||
}
|
||||
|
||||
resp, err := f.fsc.listContent(f.buildPath(), params, headers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer resp.body.Close()
|
||||
var cl uint64
|
||||
cl, err = strconv.ParseUint(resp.headers.Get("x-ms-content-length"), 10, 64)
|
||||
if err != nil {
|
||||
ioutil.ReadAll(resp.body)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var out FileRanges
|
||||
out.ContentLength = cl
|
||||
out.ETag = resp.headers.Get("ETag")
|
||||
out.LastModified = resp.headers.Get("Last-Modified")
|
||||
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
return &out, err
|
||||
}
|
||||
|
||||
// modifies a range of bytes in this file
|
||||
func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) (http.Header, error) {
|
||||
if err := f.fsc.checkForStorageEmulator(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fileRange.End < fileRange.Start {
|
||||
return nil, errors.New("the value for rangeEnd must be greater than or equal to rangeStart")
|
||||
}
|
||||
if bytes != nil && isRangeTooBig(fileRange) {
|
||||
return nil, errors.New("range cannot exceed 4MB in size")
|
||||
}
|
||||
|
||||
uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{"comp": {"range"}})
|
||||
|
||||
// default to clear
|
||||
write := "clear"
|
||||
cl := uint64(0)
|
||||
|
||||
// if bytes is not nil then this is an update operation
|
||||
if bytes != nil {
|
||||
write = "update"
|
||||
cl = (fileRange.End - fileRange.Start) + 1
|
||||
}
|
||||
|
||||
extraHeaders := map[string]string{
|
||||
"Content-Length": strconv.FormatUint(cl, 10),
|
||||
"Range": fileRange.String(),
|
||||
"x-ms-write": write,
|
||||
}
|
||||
|
||||
if contentMD5 != nil {
|
||||
extraHeaders["Content-MD5"] = *contentMD5
|
||||
}
|
||||
|
||||
headers := mergeHeaders(f.fsc.client.getStandardHeaders(), extraHeaders)
|
||||
resp, err := f.fsc.client.exec(http.MethodPut, uri, headers, bytes, f.fsc.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// SetMetadata replaces the metadata for this file.
|
||||
//
|
||||
// Some keys may be converted to Camel-Case before sending. All keys
|
||||
// are returned in lower case by GetFileMetadata. HTTP header names
|
||||
// are case-insensitive so case munging should not matter to other
|
||||
// applications either.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689097.aspx
|
||||
func (f *File) SetMetadata() error {
|
||||
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetProperties sets system properties on this file.
|
||||
//
|
||||
// Some keys may be converted to Camel-Case before sending. All keys
|
||||
// are returned in lower case by SetFileProperties. HTTP header names
|
||||
// are case-insensitive so case munging should not matter to other
|
||||
// applications either.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn166975.aspx
|
||||
func (f *File) SetProperties() error {
|
||||
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// updates Etag and last modified date
|
||||
func (f *File) updateEtagAndLastModified(headers http.Header) {
|
||||
f.Properties.Etag = headers.Get("Etag")
|
||||
f.Properties.LastModified = headers.Get("Last-Modified")
|
||||
}
|
||||
|
||||
// updates Etag, last modified date and x-ms-copy-id
|
||||
func (f *File) updateEtagLastModifiedAndCopyHeaders(headers http.Header) {
|
||||
f.Properties.Etag = headers.Get("Etag")
|
||||
f.Properties.LastModified = headers.Get("Last-Modified")
|
||||
f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id")
|
||||
f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status")
|
||||
}
|
||||
|
||||
// updates file properties from the specified HTTP header
|
||||
func (f *File) updateProperties(header http.Header) {
|
||||
size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64)
|
||||
if err == nil {
|
||||
f.Properties.Length = size
|
||||
}
|
||||
|
||||
f.updateEtagAndLastModified(header)
|
||||
f.Properties.CacheControl = header.Get("Cache-Control")
|
||||
f.Properties.Disposition = header.Get("Content-Disposition")
|
||||
f.Properties.Encoding = header.Get("Content-Encoding")
|
||||
f.Properties.Language = header.Get("Content-Language")
|
||||
f.Properties.MD5 = header.Get("Content-MD5")
|
||||
f.Properties.Type = header.Get("Content-Type")
|
||||
}
|
||||
|
||||
// URL gets the canonical URL to this file.
|
||||
// This method does not create a publicly accessible URL if the file
|
||||
// is private and this method does not check if the file exists.
|
||||
func (f *File) URL() string {
|
||||
return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{})
|
||||
}
|
||||
|
||||
// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content.
|
||||
// Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with a maximum size of 4MB.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx
|
||||
func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) error {
|
||||
if bytes == nil {
|
||||
return errors.New("bytes cannot be nil")
|
||||
}
|
||||
|
||||
headers, err := f.modifyRange(bytes, fileRange, contentMD5)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,375 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FileServiceClient contains operations for Microsoft Azure File Service.
|
||||
type FileServiceClient struct {
|
||||
client Client
|
||||
auth authentication
|
||||
}
|
||||
|
||||
// ListSharesParameters defines the set of customizable parameters to make a
|
||||
// List Shares call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx
|
||||
type ListSharesParameters struct {
|
||||
Prefix string
|
||||
Marker string
|
||||
Include string
|
||||
MaxResults uint
|
||||
Timeout uint
|
||||
}
|
||||
|
||||
// ShareListResponse contains the response fields from
|
||||
// ListShares call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx
|
||||
type ShareListResponse struct {
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
MaxResults int64 `xml:"MaxResults"`
|
||||
Shares []Share `xml:"Shares>Share"`
|
||||
}
|
||||
|
||||
type compType string
|
||||
|
||||
const (
|
||||
compNone compType = ""
|
||||
compList compType = "list"
|
||||
compMetadata compType = "metadata"
|
||||
compProperties compType = "properties"
|
||||
compRangeList compType = "rangelist"
|
||||
)
|
||||
|
||||
func (ct compType) String() string {
|
||||
return string(ct)
|
||||
}
|
||||
|
||||
type resourceType string
|
||||
|
||||
const (
|
||||
resourceDirectory resourceType = "directory"
|
||||
resourceFile resourceType = ""
|
||||
resourceShare resourceType = "share"
|
||||
)
|
||||
|
||||
func (rt resourceType) String() string {
|
||||
return string(rt)
|
||||
}
|
||||
|
||||
func (p ListSharesParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Prefix != "" {
|
||||
out.Set("prefix", p.Prefix)
|
||||
}
|
||||
if p.Marker != "" {
|
||||
out.Set("marker", p.Marker)
|
||||
}
|
||||
if p.Include != "" {
|
||||
out.Set("include", p.Include)
|
||||
}
|
||||
if p.MaxResults != 0 {
|
||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
||||
}
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func (p ListDirsAndFilesParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Marker != "" {
|
||||
out.Set("marker", p.Marker)
|
||||
}
|
||||
if p.MaxResults != 0 {
|
||||
out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
|
||||
}
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// returns url.Values for the specified types
|
||||
func getURLInitValues(comp compType, res resourceType) url.Values {
|
||||
values := url.Values{}
|
||||
if comp != compNone {
|
||||
values.Set("comp", comp.String())
|
||||
}
|
||||
if res != resourceFile {
|
||||
values.Set("restype", res.String())
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// GetShareReference returns a Share object for the specified share name.
|
||||
func (f FileServiceClient) GetShareReference(name string) Share {
|
||||
return Share{
|
||||
fsc: &f,
|
||||
Name: name,
|
||||
Properties: ShareProperties{
|
||||
Quota: -1,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ListShares returns the list of shares in a storage account along with
|
||||
// pagination token and other response details.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) {
|
||||
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
|
||||
|
||||
var out ShareListResponse
|
||||
resp, err := f.listContent("", q, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
|
||||
// assign our client to the newly created Share objects
|
||||
for i := range out.Shares {
|
||||
out.Shares[i].fsc = &f
|
||||
}
|
||||
return &out, err
|
||||
}
|
||||
|
||||
// GetServiceProperties gets the properties of your storage account's file service.
|
||||
// File service does not support logging
|
||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-service-properties
|
||||
func (f *FileServiceClient) GetServiceProperties() (*ServiceProperties, error) {
|
||||
return f.client.getServiceProperties(fileServiceName, f.auth)
|
||||
}
|
||||
|
||||
// SetServiceProperties sets the properties of your storage account's file service.
|
||||
// File service does not support logging
|
||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-file-service-properties
|
||||
func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error {
|
||||
return f.client.setServiceProperties(props, fileServiceName, f.auth)
|
||||
}
|
||||
|
||||
// retrieves directory or share content
|
||||
func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*storageResponse, error) {
|
||||
if err := f.checkForStorageEmulator(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uri := f.client.getEndpoint(fileServiceName, path, params)
|
||||
extraHeaders = f.client.protectUserAgent(extraHeaders)
|
||||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
||||
|
||||
resp, err := f.client.exec(http.MethodGet, uri, headers, nil, f.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
readAndCloseBody(resp.body)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// returns true if the specified resource exists
|
||||
func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, http.Header, error) {
|
||||
if err := f.checkForStorageEmulator(); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res))
|
||||
headers := f.client.getStandardHeaders()
|
||||
|
||||
resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth)
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusOK, resp.headers, nil
|
||||
}
|
||||
}
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
// creates a resource depending on the specified resource type
|
||||
func (f FileServiceClient) createResource(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string, expectedResponseCodes []int) (http.Header, error) {
|
||||
resp, err := f.createResourceNoClose(path, res, urlParams, extraHeaders)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
return resp.headers, checkRespCode(resp.statusCode, expectedResponseCodes)
|
||||
}
|
||||
|
||||
// creates a resource depending on the specified resource type, doesn't close the response body
|
||||
func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*storageResponse, error) {
|
||||
if err := f.checkForStorageEmulator(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
values := getURLInitValues(compNone, res)
|
||||
combinedParams := mergeParams(values, urlParams)
|
||||
uri := f.client.getEndpoint(fileServiceName, path, combinedParams)
|
||||
extraHeaders = f.client.protectUserAgent(extraHeaders)
|
||||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
||||
|
||||
return f.client.exec(http.MethodPut, uri, headers, nil, f.auth)
|
||||
}
|
||||
|
||||
// returns HTTP header data for the specified directory or share
|
||||
func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, verb string) (http.Header, error) {
|
||||
resp, err := f.getResourceNoClose(path, comp, res, verb, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp.headers, nil
|
||||
}
|
||||
|
||||
// gets the specified resource, doesn't close the response body
|
||||
func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, verb string, extraHeaders map[string]string) (*storageResponse, error) {
|
||||
if err := f.checkForStorageEmulator(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
params := getURLInitValues(comp, res)
|
||||
uri := f.client.getEndpoint(fileServiceName, path, params)
|
||||
extraHeaders = f.client.protectUserAgent(extraHeaders)
|
||||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
||||
|
||||
return f.client.exec(verb, uri, headers, nil, f.auth)
|
||||
}
|
||||
|
||||
// deletes the resource and returns the response
|
||||
func (f FileServiceClient) deleteResource(path string, res resourceType) error {
|
||||
resp, err := f.deleteResourceNoClose(path, res)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
||||
}
|
||||
|
||||
// deletes the resource and returns the response, doesn't close the response body
|
||||
func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType) (*storageResponse, error) {
|
||||
if err := f.checkForStorageEmulator(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
values := getURLInitValues(compNone, res)
|
||||
uri := f.client.getEndpoint(fileServiceName, path, values)
|
||||
return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth)
|
||||
}
|
||||
|
||||
// merges metadata into extraHeaders and returns extraHeaders
|
||||
func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string {
|
||||
if metadata == nil && extraHeaders == nil {
|
||||
return nil
|
||||
}
|
||||
if extraHeaders == nil {
|
||||
extraHeaders = make(map[string]string)
|
||||
}
|
||||
for k, v := range metadata {
|
||||
extraHeaders[userDefinedMetadataHeaderPrefix+k] = v
|
||||
}
|
||||
return extraHeaders
|
||||
}
|
||||
|
||||
// merges extraHeaders into headers and returns headers
|
||||
func mergeHeaders(headers, extraHeaders map[string]string) map[string]string {
|
||||
for k, v := range extraHeaders {
|
||||
headers[k] = v
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
// sets extra header data for the specified resource
|
||||
func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string) (http.Header, error) {
|
||||
if err := f.checkForStorageEmulator(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
params := getURLInitValues(comp, res)
|
||||
uri := f.client.getEndpoint(fileServiceName, path, params)
|
||||
extraHeaders = f.client.protectUserAgent(extraHeaders)
|
||||
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
|
||||
|
||||
resp, err := f.client.exec(http.MethodPut, uri, headers, nil, f.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// gets metadata for the specified resource
|
||||
func (f FileServiceClient) getMetadata(path string, res resourceType) (map[string]string, error) {
|
||||
if err := f.checkForStorageEmulator(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
headers, err := f.getResourceHeaders(path, compMetadata, res, http.MethodGet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return getMetadataFromHeaders(headers), nil
|
||||
}
|
||||
|
||||
// returns a map of custom metadata values from the specified HTTP header
|
||||
func getMetadataFromHeaders(header http.Header) map[string]string {
|
||||
metadata := make(map[string]string)
|
||||
for k, v := range header {
|
||||
// Can't trust CanonicalHeaderKey() to munge case
|
||||
// reliably. "_" is allowed in identifiers:
|
||||
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
||||
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
|
||||
// http://tools.ietf.org/html/rfc7230#section-3.2
|
||||
// ...but "_" is considered invalid by
|
||||
// CanonicalMIMEHeaderKey in
|
||||
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
|
||||
// so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar".
|
||||
k = strings.ToLower(k)
|
||||
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
|
||||
continue
|
||||
}
|
||||
// metadata["foo"] = content of the last X-Ms-Meta-Foo header
|
||||
k = k[len(userDefinedMetadataHeaderPrefix):]
|
||||
metadata[k] = v[len(v)-1]
|
||||
}
|
||||
|
||||
if len(metadata) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return metadata
|
||||
}
|
||||
|
||||
//checkForStorageEmulator determines if the client is setup for use with
|
||||
//Azure Storage Emulator, and returns a relevant error
|
||||
func (f FileServiceClient) checkForStorageEmulator() error {
|
||||
if f.client.accountName == StorageEmulatorAccountName {
|
||||
return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator")
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
hash: a97c0c90fe4d23bbd8e5745431f633e75530bb611131b786d76b8e1763bce85e
|
||||
updated: 2017-02-23T09:58:57.3701584-08:00
|
||||
imports:
|
||||
- name: github.com/Azure/go-autorest
|
||||
version: ec5f4903f77ed9927ac95b19ab8e44ada64c1356
|
||||
subpackages:
|
||||
- autorest/azure
|
||||
- autorest
|
||||
- autorest/date
|
||||
- name: github.com/dgrijalva/jwt-go
|
||||
version: 2268707a8f0843315e2004ee4f1d021dc08baedf
|
||||
testImports:
|
||||
- name: gopkg.in/check.v1
|
||||
version: 20d25e2804050c1cd24a7eea1e7a6447dd0e74ec
|
|
@ -0,0 +1,4 @@
|
|||
package: github.com/Azure/azure-sdk-for-go-storage
|
||||
import: []
|
||||
testImport:
|
||||
- package: gopkg.in/check.v1
|
|
@ -15,12 +15,6 @@ const (
|
|||
userDefinedMetadataHeaderPrefix = "X-Ms-Meta-"
|
||||
)
|
||||
|
||||
// QueueServiceClient contains operations for Microsoft Azure Queue Storage
|
||||
// Service.
|
||||
type QueueServiceClient struct {
|
||||
client Client
|
||||
}
|
||||
|
||||
func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) }
|
||||
func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) }
|
||||
func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) }
|
||||
|
@ -82,6 +76,24 @@ func (p PeekMessagesParameters) getParameters() url.Values {
|
|||
return out
|
||||
}
|
||||
|
||||
// UpdateMessageParameters is the set of options can be specified for Update Messsage
|
||||
// operation. A zero struct does not use any preferences for the request.
|
||||
type UpdateMessageParameters struct {
|
||||
PopReceipt string
|
||||
VisibilityTimeout int
|
||||
}
|
||||
|
||||
func (p UpdateMessageParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
if p.PopReceipt != "" {
|
||||
out.Set("popreceipt", p.PopReceipt)
|
||||
}
|
||||
if p.VisibilityTimeout != 0 {
|
||||
out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// GetMessagesResponse represents a response returned from Get Messages
|
||||
// operation.
|
||||
type GetMessagesResponse struct {
|
||||
|
@ -133,16 +145,17 @@ type QueueMetadataResponse struct {
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/dd179348.aspx
|
||||
func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}})
|
||||
metadata = c.client.protectUserAgent(metadata)
|
||||
headers := c.client.getStandardHeaders()
|
||||
for k, v := range metadata {
|
||||
headers[userDefinedMetadataHeaderPrefix+k] = v
|
||||
}
|
||||
|
||||
resp, err := c.client.exec("PUT", uri, headers, nil)
|
||||
resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
@ -161,11 +174,11 @@ func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, err
|
|||
qm.UserDefinedMetadata = make(map[string]string)
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}})
|
||||
headers := c.client.getStandardHeaders()
|
||||
resp, err := c.client.exec("GET", uri, headers, nil)
|
||||
resp, err := c.client.exec(http.MethodGet, uri, headers, nil, c.auth)
|
||||
if err != nil {
|
||||
return qm, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
for k, v := range resp.headers {
|
||||
if len(v) != 1 {
|
||||
|
@ -194,11 +207,11 @@ func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, err
|
|||
func (c QueueServiceClient) CreateQueue(name string) error {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
|
||||
headers := c.client.getStandardHeaders()
|
||||
resp, err := c.client.exec("PUT", uri, headers, nil)
|
||||
resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
|
@ -207,18 +220,18 @@ func (c QueueServiceClient) CreateQueue(name string) error {
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx
|
||||
func (c QueueServiceClient) DeleteQueue(name string) error {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
|
||||
resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil)
|
||||
resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// QueueExists returns true if a queue with given name exists.
|
||||
func (c QueueServiceClient) QueueExists(name string) (bool, error) {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}})
|
||||
resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil)
|
||||
resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth)
|
||||
if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) {
|
||||
return resp.statusCode == http.StatusOK, nil
|
||||
}
|
||||
|
@ -238,11 +251,11 @@ func (c QueueServiceClient) PutMessage(queue string, message string, params PutM
|
|||
}
|
||||
headers := c.client.getStandardHeaders()
|
||||
headers["Content-Length"] = strconv.Itoa(nn)
|
||||
resp, err := c.client.exec("POST", uri, headers, body)
|
||||
resp, err := c.client.exec(http.MethodPost, uri, headers, body, c.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
|
@ -251,11 +264,11 @@ func (c QueueServiceClient) PutMessage(queue string, message string, params PutM
|
|||
// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx
|
||||
func (c QueueServiceClient) ClearMessages(queue string) error {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{})
|
||||
resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil)
|
||||
resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
|
@ -266,7 +279,7 @@ func (c QueueServiceClient) ClearMessages(queue string) error {
|
|||
func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) {
|
||||
var r GetMessagesResponse
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
|
||||
resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil)
|
||||
resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
|
@ -282,7 +295,7 @@ func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParamete
|
|||
func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) {
|
||||
var r PeekMessagesResponse
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
|
||||
resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil)
|
||||
resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
|
@ -297,10 +310,30 @@ func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParame
|
|||
func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{
|
||||
"popreceipt": {popReceipt}})
|
||||
resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil)
|
||||
resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// UpdateMessage operation deletes the specified message.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/hh452234.aspx
|
||||
func (c QueueServiceClient) UpdateMessage(queue string, messageID string, message string, params UpdateMessageParameters) error {
|
||||
uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), params.getParameters())
|
||||
req := putMessageRequest{MessageText: message}
|
||||
body, nn, err := xmlMarshal(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headers := c.client.getStandardHeaders()
|
||||
headers["Content-Length"] = fmt.Sprintf("%d", nn)
|
||||
resp, err := c.client.exec(http.MethodPut, uri, headers, body, c.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
package storage
|
||||
|
||||
// QueueServiceClient contains operations for Microsoft Azure Queue Storage
|
||||
// Service.
|
||||
type QueueServiceClient struct {
|
||||
client Client
|
||||
auth authentication
|
||||
}
|
||||
|
||||
// GetServiceProperties gets the properties of your storage account's queue service.
|
||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties
|
||||
func (c *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) {
|
||||
return c.client.getServiceProperties(queueServiceName, c.auth)
|
||||
}
|
||||
|
||||
// SetServiceProperties sets the properties of your storage account's queue service.
|
||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties
|
||||
func (c *QueueServiceClient) SetServiceProperties(props ServiceProperties) error {
|
||||
return c.client.setServiceProperties(props, queueServiceName, c.auth)
|
||||
}
|
|
@ -0,0 +1,186 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Share represents an Azure file share.
|
||||
type Share struct {
|
||||
fsc *FileServiceClient
|
||||
Name string `xml:"Name"`
|
||||
Properties ShareProperties `xml:"Properties"`
|
||||
Metadata map[string]string
|
||||
}
|
||||
|
||||
// ShareProperties contains various properties of a share.
|
||||
type ShareProperties struct {
|
||||
LastModified string `xml:"Last-Modified"`
|
||||
Etag string `xml:"Etag"`
|
||||
Quota int `xml:"Quota"`
|
||||
}
|
||||
|
||||
// builds the complete path for this share object.
|
||||
func (s *Share) buildPath() string {
|
||||
return fmt.Sprintf("/%s", s.Name)
|
||||
}
|
||||
|
||||
// Create this share under the associated account.
|
||||
// If a share with the same name already exists, the operation fails.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx
|
||||
func (s *Share) Create() error {
|
||||
headers, err := s.fsc.createResource(s.buildPath(), resourceShare, nil, mergeMDIntoExtraHeaders(s.Metadata, nil), []int{http.StatusCreated})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateIfNotExists creates this share under the associated account if
|
||||
// it does not exist. Returns true if the share is newly created or false if
|
||||
// the share already exists.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx
|
||||
func (s *Share) CreateIfNotExists() (bool, error) {
|
||||
resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, nil, nil)
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
||||
if resp.statusCode == http.StatusCreated {
|
||||
s.updateEtagAndLastModified(resp.headers)
|
||||
return true, nil
|
||||
}
|
||||
return false, s.FetchAttributes()
|
||||
}
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Delete marks this share for deletion. The share along with any files
|
||||
// and directories contained within it are later deleted during garbage
|
||||
// collection. If the share does not exist the operation fails
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx
|
||||
func (s *Share) Delete() error {
|
||||
return s.fsc.deleteResource(s.buildPath(), resourceShare)
|
||||
}
|
||||
|
||||
// DeleteIfExists operation marks this share for deletion if it exists.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx
|
||||
func (s *Share) DeleteIfExists() (bool, error) {
|
||||
resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare)
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Exists returns true if this share already exists
|
||||
// on the storage account, otherwise returns false.
|
||||
func (s *Share) Exists() (bool, error) {
|
||||
exists, headers, err := s.fsc.resourceExists(s.buildPath(), resourceShare)
|
||||
if exists {
|
||||
s.updateEtagAndLastModified(headers)
|
||||
s.updateQuota(headers)
|
||||
}
|
||||
return exists, err
|
||||
}
|
||||
|
||||
// FetchAttributes retrieves metadata and properties for this share.
|
||||
func (s *Share) FetchAttributes() error {
|
||||
headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, http.MethodHead)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.updateEtagAndLastModified(headers)
|
||||
s.updateQuota(headers)
|
||||
s.Metadata = getMetadataFromHeaders(headers)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRootDirectoryReference returns a Directory object at the root of this share.
|
||||
func (s *Share) GetRootDirectoryReference() *Directory {
|
||||
return &Directory{
|
||||
fsc: s.fsc,
|
||||
share: s,
|
||||
}
|
||||
}
|
||||
|
||||
// ServiceClient returns the FileServiceClient associated with this share.
|
||||
func (s *Share) ServiceClient() *FileServiceClient {
|
||||
return s.fsc
|
||||
}
|
||||
|
||||
// SetMetadata replaces the metadata for this share.
|
||||
//
|
||||
// Some keys may be converted to Camel-Case before sending. All keys
|
||||
// are returned in lower case by GetShareMetadata. HTTP header names
|
||||
// are case-insensitive so case munging should not matter to other
|
||||
// applications either.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
||||
func (s *Share) SetMetadata() error {
|
||||
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetProperties sets system properties for this share.
|
||||
//
|
||||
// Some keys may be converted to Camel-Case before sending. All keys
|
||||
// are returned in lower case by SetShareProperties. HTTP header names
|
||||
// are case-insensitive so case munging should not matter to other
|
||||
// applications either.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/mt427368.aspx
|
||||
func (s *Share) SetProperties() error {
|
||||
if s.Properties.Quota < 1 || s.Properties.Quota > 5120 {
|
||||
return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota)
|
||||
}
|
||||
|
||||
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, map[string]string{
|
||||
"x-ms-share-quota": strconv.Itoa(s.Properties.Quota),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// updates Etag and last modified date
|
||||
func (s *Share) updateEtagAndLastModified(headers http.Header) {
|
||||
s.Properties.Etag = headers.Get("Etag")
|
||||
s.Properties.LastModified = headers.Get("Last-Modified")
|
||||
}
|
||||
|
||||
// updates quota value
|
||||
func (s *Share) updateQuota(headers http.Header) {
|
||||
quota, err := strconv.Atoi(headers.Get("x-ms-share-quota"))
|
||||
if err == nil {
|
||||
s.Properties.Quota = quota
|
||||
}
|
||||
}
|
||||
|
||||
// URL gets the canonical URL to this share. This method does not create a publicly accessible
|
||||
// URL if the share is private and this method does not check if the share exists.
|
||||
func (s *Share) URL() string {
|
||||
return s.fsc.client.getEndpoint(fileServiceName, s.buildPath(), url.Values{})
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AccessPolicyDetailsXML has specifics about an access policy
|
||||
// annotated with XML details.
|
||||
type AccessPolicyDetailsXML struct {
|
||||
StartTime time.Time `xml:"Start"`
|
||||
ExpiryTime time.Time `xml:"Expiry"`
|
||||
Permission string `xml:"Permission"`
|
||||
}
|
||||
|
||||
// SignedIdentifier is a wrapper for a specific policy
|
||||
type SignedIdentifier struct {
|
||||
ID string `xml:"Id"`
|
||||
AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"`
|
||||
}
|
||||
|
||||
// SignedIdentifiers part of the response from GetPermissions call.
|
||||
type SignedIdentifiers struct {
|
||||
SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"`
|
||||
}
|
||||
|
||||
// AccessPolicy is the response type from the GetPermissions call.
|
||||
type AccessPolicy struct {
|
||||
SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"`
|
||||
}
|
||||
|
||||
// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the
|
||||
// AccessPolicy struct which will get converted to XML.
|
||||
func convertAccessPolicyToXMLStructs(id string, startTime time.Time, expiryTime time.Time, permissions string) SignedIdentifier {
|
||||
return SignedIdentifier{
|
||||
ID: id,
|
||||
AccessPolicy: AccessPolicyDetailsXML{
|
||||
StartTime: startTime.UTC().Round(time.Second),
|
||||
ExpiryTime: expiryTime.UTC().Round(time.Second),
|
||||
Permission: permissions,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func updatePermissions(permissions, permission string) bool {
|
||||
return strings.Contains(permissions, permission)
|
||||
}
|
|
@ -0,0 +1,118 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// ServiceProperties represents the storage account service properties
|
||||
type ServiceProperties struct {
|
||||
Logging *Logging
|
||||
HourMetrics *Metrics
|
||||
MinuteMetrics *Metrics
|
||||
Cors *Cors
|
||||
}
|
||||
|
||||
// Logging represents the Azure Analytics Logging settings
|
||||
type Logging struct {
|
||||
Version string
|
||||
Delete bool
|
||||
Read bool
|
||||
Write bool
|
||||
RetentionPolicy *RetentionPolicy
|
||||
}
|
||||
|
||||
// RetentionPolicy indicates if retention is enabled and for how many days
|
||||
type RetentionPolicy struct {
|
||||
Enabled bool
|
||||
Days *int
|
||||
}
|
||||
|
||||
// Metrics provide request statistics.
|
||||
type Metrics struct {
|
||||
Version string
|
||||
Enabled bool
|
||||
IncludeAPIs *bool
|
||||
RetentionPolicy *RetentionPolicy
|
||||
}
|
||||
|
||||
// Cors includes all the CORS rules
|
||||
type Cors struct {
|
||||
CorsRule []CorsRule
|
||||
}
|
||||
|
||||
// CorsRule includes all settings for a Cors rule
|
||||
type CorsRule struct {
|
||||
AllowedOrigins string
|
||||
AllowedMethods string
|
||||
MaxAgeInSeconds int
|
||||
ExposedHeaders string
|
||||
AllowedHeaders string
|
||||
}
|
||||
|
||||
func (c Client) getServiceProperties(service string, auth authentication) (*ServiceProperties, error) {
|
||||
query := url.Values{
|
||||
"restype": {"service"},
|
||||
"comp": {"properties"},
|
||||
}
|
||||
uri := c.getEndpoint(service, "", query)
|
||||
headers := c.getStandardHeaders()
|
||||
|
||||
resp, err := c.exec(http.MethodGet, uri, headers, nil, auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var out ServiceProperties
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func (c Client) setServiceProperties(props ServiceProperties, service string, auth authentication) error {
|
||||
query := url.Values{
|
||||
"restype": {"service"},
|
||||
"comp": {"properties"},
|
||||
}
|
||||
uri := c.getEndpoint(service, "", query)
|
||||
|
||||
// Ideally, StorageServiceProperties would be the output struct
|
||||
// This is to avoid golint stuttering, while generating the correct XML
|
||||
type StorageServiceProperties struct {
|
||||
Logging *Logging
|
||||
HourMetrics *Metrics
|
||||
MinuteMetrics *Metrics
|
||||
Cors *Cors
|
||||
}
|
||||
input := StorageServiceProperties{
|
||||
Logging: props.Logging,
|
||||
HourMetrics: props.HourMetrics,
|
||||
MinuteMetrics: props.MinuteMetrics,
|
||||
Cors: props.Cors,
|
||||
}
|
||||
|
||||
body, length, err := xmlMarshal(input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers := c.getStandardHeaders()
|
||||
headers["Content-Length"] = fmt.Sprintf("%v", length)
|
||||
|
||||
resp, err := c.exec(http.MethodPut, uri, headers, body, auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
||||
}
|
|
@ -0,0 +1,254 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AzureTable is the typedef of the Azure Table name
|
||||
type AzureTable string
|
||||
|
||||
const (
|
||||
tablesURIPath = "/Tables"
|
||||
)
|
||||
|
||||
type createTableRequest struct {
|
||||
TableName string `json:"TableName"`
|
||||
}
|
||||
|
||||
// TableAccessPolicy are used for SETTING table policies
|
||||
type TableAccessPolicy struct {
|
||||
ID string
|
||||
StartTime time.Time
|
||||
ExpiryTime time.Time
|
||||
CanRead bool
|
||||
CanAppend bool
|
||||
CanUpdate bool
|
||||
CanDelete bool
|
||||
}
|
||||
|
||||
func pathForTable(table AzureTable) string { return fmt.Sprintf("%s", table) }
|
||||
|
||||
func (c *TableServiceClient) getStandardHeaders() map[string]string {
|
||||
return map[string]string{
|
||||
"x-ms-version": "2015-02-21",
|
||||
"x-ms-date": currentTimeRfc1123Formatted(),
|
||||
"Accept": "application/json;odata=nometadata",
|
||||
"Accept-Charset": "UTF-8",
|
||||
"Content-Type": "application/json",
|
||||
userAgentHeader: c.client.userAgent,
|
||||
}
|
||||
}
|
||||
|
||||
// QueryTables returns the tables created in the
|
||||
// *TableServiceClient storage account.
|
||||
func (c *TableServiceClient) QueryTables() ([]AzureTable, error) {
|
||||
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
|
||||
|
||||
headers := c.getStandardHeaders()
|
||||
headers["Content-Length"] = "0"
|
||||
|
||||
resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
ioutil.ReadAll(resp.body)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := buf.ReadFrom(resp.body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var respArray queryTablesResponse
|
||||
if err := json.Unmarshal(buf.Bytes(), &respArray); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := make([]AzureTable, len(respArray.TableName))
|
||||
for i, elem := range respArray.TableName {
|
||||
s[i] = AzureTable(elem.TableName)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// CreateTable creates the table given the specific
|
||||
// name. This function fails if the name is not compliant
|
||||
// with the specification or the tables already exists.
|
||||
func (c *TableServiceClient) CreateTable(table AzureTable) error {
|
||||
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
|
||||
|
||||
headers := c.getStandardHeaders()
|
||||
|
||||
req := createTableRequest{TableName: string(table)}
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
if err := json.NewEncoder(buf).Encode(req); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers["Content-Length"] = fmt.Sprintf("%d", buf.Len())
|
||||
|
||||
resp, err := c.client.execInternalJSON(http.MethodPost, uri, headers, buf, c.auth)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteTable deletes the table given the specific
|
||||
// name. This function fails if the table is not present.
|
||||
// Be advised: DeleteTable deletes all the entries
|
||||
// that may be present.
|
||||
func (c *TableServiceClient) DeleteTable(table AzureTable) error {
|
||||
uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
|
||||
uri += fmt.Sprintf("('%s')", string(table))
|
||||
|
||||
headers := c.getStandardHeaders()
|
||||
|
||||
headers["Content-Length"] = "0"
|
||||
|
||||
resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
||||
return err
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetTablePermissions sets up table ACL permissions as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Table-ACL
|
||||
func (c *TableServiceClient) SetTablePermissions(table AzureTable, policies []TableAccessPolicy, timeout uint) (err error) {
|
||||
params := url.Values{"comp": {"acl"}}
|
||||
|
||||
if timeout > 0 {
|
||||
params.Add("timeout", fmt.Sprint(timeout))
|
||||
}
|
||||
|
||||
uri := c.client.getEndpoint(tableServiceName, string(table), params)
|
||||
headers := c.client.getStandardHeaders()
|
||||
|
||||
body, length, err := generateTableACLPayload(policies)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headers["Content-Length"] = fmt.Sprintf("%v", length)
|
||||
|
||||
resp, err := c.client.execInternalJSON(http.MethodPut, uri, headers, body, c.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) {
|
||||
sil := SignedIdentifiers{
|
||||
SignedIdentifiers: []SignedIdentifier{},
|
||||
}
|
||||
for _, tap := range policies {
|
||||
permission := generateTablePermissions(&tap)
|
||||
signedIdentifier := convertAccessPolicyToXMLStructs(tap.ID, tap.StartTime, tap.ExpiryTime, permission)
|
||||
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
|
||||
}
|
||||
return xmlMarshal(sil)
|
||||
}
|
||||
|
||||
// GetTablePermissions gets the table ACL permissions, as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-acl
|
||||
func (c *TableServiceClient) GetTablePermissions(table AzureTable, timeout int) (permissionResponse []TableAccessPolicy, err error) {
|
||||
params := url.Values{"comp": {"acl"}}
|
||||
|
||||
if timeout > 0 {
|
||||
params.Add("timeout", strconv.Itoa(timeout))
|
||||
}
|
||||
|
||||
uri := c.client.getEndpoint(tableServiceName, string(table), params)
|
||||
headers := c.client.getStandardHeaders()
|
||||
resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
ioutil.ReadAll(resp.body)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ap AccessPolicy
|
||||
err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := updateTableAccessPolicy(ap)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy {
|
||||
out := []TableAccessPolicy{}
|
||||
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
|
||||
tap := TableAccessPolicy{
|
||||
ID: policy.ID,
|
||||
StartTime: policy.AccessPolicy.StartTime,
|
||||
ExpiryTime: policy.AccessPolicy.ExpiryTime,
|
||||
}
|
||||
tap.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
|
||||
tap.CanAppend = updatePermissions(policy.AccessPolicy.Permission, "a")
|
||||
tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u")
|
||||
tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
|
||||
|
||||
out = append(out, tap)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func generateTablePermissions(tap *TableAccessPolicy) (permissions string) {
|
||||
// generate the permissions string (raud).
|
||||
// still want the end user API to have bool flags.
|
||||
permissions = ""
|
||||
|
||||
if tap.CanRead {
|
||||
permissions += "r"
|
||||
}
|
||||
|
||||
if tap.CanAppend {
|
||||
permissions += "a"
|
||||
}
|
||||
|
||||
if tap.CanUpdate {
|
||||
permissions += "u"
|
||||
}
|
||||
|
||||
if tap.CanDelete {
|
||||
permissions += "d"
|
||||
}
|
||||
return permissions
|
||||
}
|
|
@ -10,6 +10,8 @@ import (
|
|||
"reflect"
|
||||
)
|
||||
|
||||
// Annotating as secure for gas scanning
|
||||
/* #nosec */
|
||||
const (
|
||||
partitionKeyNode = "PartitionKey"
|
||||
rowKeyNode = "RowKey"
|
||||
|
@ -96,7 +98,7 @@ func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousCo
|
|||
|
||||
headers["Content-Length"] = "0"
|
||||
|
||||
resp, err := c.client.execTable("GET", uri, headers, nil)
|
||||
resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
@ -104,12 +106,9 @@ func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousCo
|
|||
|
||||
contToken := extractContinuationTokenFromHeaders(resp.headers)
|
||||
|
||||
if err != nil {
|
||||
return nil, contToken, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return nil, contToken, err
|
||||
}
|
||||
|
||||
|
@ -125,13 +124,12 @@ func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousCo
|
|||
// The function fails if there is an entity with the same
|
||||
// PartitionKey and RowKey in the table.
|
||||
func (c *TableServiceClient) InsertEntity(table AzureTable, entity TableEntity) error {
|
||||
var err error
|
||||
|
||||
if sc, err := c.execTable(table, entity, false, "POST"); err != nil {
|
||||
return checkRespCode(sc, []int{http.StatusCreated})
|
||||
sc, err := c.execTable(table, entity, false, http.MethodPost)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return err
|
||||
return checkRespCode(sc, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, specifyKeysInURL bool, method string) (int, error) {
|
||||
|
@ -150,10 +148,7 @@ func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, spe
|
|||
|
||||
headers["Content-Length"] = fmt.Sprintf("%d", buf.Len())
|
||||
|
||||
var err error
|
||||
var resp *odataResponse
|
||||
|
||||
resp, err = c.client.execTable(method, uri, headers, &buf)
|
||||
resp, err := c.client.execInternalJSON(method, uri, headers, &buf, c.auth)
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
@ -168,12 +163,12 @@ func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, spe
|
|||
// one passed as parameter. The function fails if there is no entity
|
||||
// with the same PartitionKey and RowKey in the table.
|
||||
func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) error {
|
||||
var err error
|
||||
|
||||
if sc, err := c.execTable(table, entity, true, "PUT"); err != nil {
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
sc, err := c.execTable(table, entity, true, http.MethodPut)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// MergeEntity merges the contents of an entity with the
|
||||
|
@ -181,12 +176,12 @@ func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity)
|
|||
// The function fails if there is no entity
|
||||
// with the same PartitionKey and RowKey in the table.
|
||||
func (c *TableServiceClient) MergeEntity(table AzureTable, entity TableEntity) error {
|
||||
var err error
|
||||
|
||||
if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil {
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
sc, err := c.execTable(table, entity, true, "MERGE")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// DeleteEntityWithoutCheck deletes the entity matching by
|
||||
|
@ -212,7 +207,7 @@ func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity,
|
|||
headers["Content-Length"] = "0"
|
||||
headers["If-Match"] = ifMatch
|
||||
|
||||
resp, err := c.client.execTable("DELETE", uri, headers, nil)
|
||||
resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -229,23 +224,23 @@ func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity,
|
|||
// InsertOrReplaceEntity inserts an entity in the specified table
|
||||
// or replaced the existing one.
|
||||
func (c *TableServiceClient) InsertOrReplaceEntity(table AzureTable, entity TableEntity) error {
|
||||
var err error
|
||||
|
||||
if sc, err := c.execTable(table, entity, true, "PUT"); err != nil {
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
sc, err := c.execTable(table, entity, true, http.MethodPut)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// InsertOrMergeEntity inserts an entity in the specified table
|
||||
// or merges the existing one.
|
||||
func (c *TableServiceClient) InsertOrMergeEntity(table AzureTable, entity TableEntity) error {
|
||||
var err error
|
||||
|
||||
if sc, err := c.execTable(table, entity, true, "MERGE"); err != nil {
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
sc, err := c.execTable(table, entity, true, "MERGE")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
|
||||
return checkRespCode(sc, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
func injectPartitionAndRowKeys(entity TableEntity, buf *bytes.Buffer) error {
|
||||
|
@ -338,8 +333,12 @@ func deserializeEntity(retType reflect.Type, reader io.Reader) ([]TableEntity, e
|
|||
}
|
||||
|
||||
// Reset PartitionKey and RowKey
|
||||
tEntries[i].SetPartitionKey(pKey)
|
||||
tEntries[i].SetRowKey(rKey)
|
||||
if err := tEntries[i].SetPartitionKey(pKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := tEntries[i].SetRowKey(rKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return tEntries, nil
|
|
@ -0,0 +1,20 @@
|
|||
package storage
|
||||
|
||||
// TableServiceClient contains operations for Microsoft Azure Table Storage
|
||||
// Service.
|
||||
type TableServiceClient struct {
|
||||
client Client
|
||||
auth authentication
|
||||
}
|
||||
|
||||
// GetServiceProperties gets the properties of your storage account's table service.
|
||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties
|
||||
func (c *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) {
|
||||
return c.client.getServiceProperties(tableServiceName, c.auth)
|
||||
}
|
||||
|
||||
// SetServiceProperties sets the properties of your storage account's table service.
|
||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties
|
||||
func (c *TableServiceClient) SetServiceProperties(props ServiceProperties) error {
|
||||
return c.client.setServiceProperties(props, tableServiceName, c.auth)
|
||||
}
|
|
@ -77,7 +77,7 @@ func headersFromStruct(v interface{}) map[string]string {
|
|||
for i := 0; i < value.NumField(); i++ {
|
||||
key := value.Type().Field(i).Tag.Get("header")
|
||||
val := value.Field(i).String()
|
||||
if val != "" {
|
||||
if key != "" && val != "" {
|
||||
headers[key] = val
|
||||
}
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
package storage
|
||||
|
||||
var (
|
||||
sdkVersion = "0.1.0"
|
||||
)
|
|
@ -0,0 +1,191 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2015 Microsoft Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,115 @@
|
|||
/*
|
||||
Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines
|
||||
and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/)
|
||||
generated Go code.
|
||||
|
||||
The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending,
|
||||
and Responding. A typical pattern is:
|
||||
|
||||
req, err := Prepare(&http.Request{},
|
||||
token.WithAuthorization())
|
||||
|
||||
resp, err := Send(req,
|
||||
WithLogging(logger),
|
||||
DoErrorIfStatusCode(http.StatusInternalServerError),
|
||||
DoCloseIfError(),
|
||||
DoRetryForAttempts(5, time.Second))
|
||||
|
||||
err = Respond(resp,
|
||||
ByDiscardingBody(),
|
||||
ByClosing())
|
||||
|
||||
Each phase relies on decorators to modify and / or manage processing. Decorators may first modify
|
||||
and then pass the data along, pass the data first and then modify the result, or wrap themselves
|
||||
around passing the data (such as a logger might do). Decorators run in the order provided. For
|
||||
example, the following:
|
||||
|
||||
req, err := Prepare(&http.Request{},
|
||||
WithBaseURL("https://microsoft.com/"),
|
||||
WithPath("a"),
|
||||
WithPath("b"),
|
||||
WithPath("c"))
|
||||
|
||||
will set the URL to:
|
||||
|
||||
https://microsoft.com/a/b/c
|
||||
|
||||
Preparers and Responders may be shared and re-used (assuming the underlying decorators support
|
||||
sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders
|
||||
shared among multiple go-routines, and a single Sender shared among multiple sending go-routines,
|
||||
all bound together by means of input / output channels.
|
||||
|
||||
Decorators hold their passed state within a closure (such as the path components in the example
|
||||
above). Be careful to share Preparers and Responders only in a context where such held state
|
||||
applies. For example, it may not make sense to share a Preparer that applies a query string from a
|
||||
fixed set of values. Similarly, sharing a Responder that reads the response body into a passed
|
||||
struct (e.g., ByUnmarshallingJson) is likely incorrect.
|
||||
|
||||
Lastly, the Swagger specification (https://swagger.io) that drives AutoRest
|
||||
(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The
|
||||
github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure
|
||||
correct parsing and formatting.
|
||||
|
||||
Errors raised by autorest objects and methods will conform to the autorest.Error interface.
|
||||
|
||||
See the included examples for more detail. For details on the suggested use of this package by
|
||||
generated clients, see the Client described below.
|
||||
*/
|
||||
package autorest
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// HeaderLocation specifies the HTTP Location header.
|
||||
HeaderLocation = "Location"
|
||||
|
||||
// HeaderRetryAfter specifies the HTTP Retry-After header.
|
||||
HeaderRetryAfter = "Retry-After"
|
||||
)
|
||||
|
||||
// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set
|
||||
// and false otherwise.
|
||||
func ResponseHasStatusCode(resp *http.Response, codes ...int) bool {
|
||||
return containsInt(codes, resp.StatusCode)
|
||||
}
|
||||
|
||||
// GetLocation retrieves the URL from the Location header of the passed response.
|
||||
func GetLocation(resp *http.Response) string {
|
||||
return resp.Header.Get(HeaderLocation)
|
||||
}
|
||||
|
||||
// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If
|
||||
// the header is absent or is malformed, it will return the supplied default delay time.Duration.
|
||||
func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration {
|
||||
retry := resp.Header.Get(HeaderRetryAfter)
|
||||
if retry == "" {
|
||||
return defaultDelay
|
||||
}
|
||||
|
||||
d, err := time.ParseDuration(retry + "s")
|
||||
if err != nil {
|
||||
return defaultDelay
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// NewPollingRequest allocates and returns a new http.Request to poll for the passed response.
|
||||
func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) {
|
||||
location := GetLocation(resp)
|
||||
if location == "" {
|
||||
return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling")
|
||||
}
|
||||
|
||||
req, err := Prepare(&http.Request{Cancel: cancel},
|
||||
AsGet(),
|
||||
WithBaseURL(location))
|
||||
if err != nil {
|
||||
return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location)
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
|
@ -0,0 +1,308 @@
|
|||
package azure
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/date"
|
||||
)
|
||||
|
||||
const (
|
||||
headerAsyncOperation = "Azure-AsyncOperation"
|
||||
)
|
||||
|
||||
const (
|
||||
methodDelete = "DELETE"
|
||||
methodPatch = "PATCH"
|
||||
methodPost = "POST"
|
||||
methodPut = "PUT"
|
||||
methodGet = "GET"
|
||||
|
||||
operationInProgress string = "InProgress"
|
||||
operationCanceled string = "Canceled"
|
||||
operationFailed string = "Failed"
|
||||
operationSucceeded string = "Succeeded"
|
||||
)
|
||||
|
||||
// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure
|
||||
// long-running operation. It will delay between requests for the duration specified in the
|
||||
// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by
|
||||
// closing the optional channel on the http.Request.
|
||||
func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator {
|
||||
return func(s autorest.Sender) autorest.Sender {
|
||||
return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
||||
resp, err = s.Do(r)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
pollingCodes := []int{http.StatusAccepted, http.StatusCreated, http.StatusOK}
|
||||
if !autorest.ResponseHasStatusCode(resp, pollingCodes...) {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
ps := pollingState{}
|
||||
for err == nil {
|
||||
err = updatePollingState(resp, &ps)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if ps.hasTerminated() {
|
||||
if !ps.hasSucceeded() {
|
||||
err = ps
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
r, err = newPollingRequest(resp, ps)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
delay = autorest.GetRetryAfter(resp, delay)
|
||||
resp, err = autorest.SendWithSender(s, r,
|
||||
autorest.AfterDelay(delay))
|
||||
}
|
||||
|
||||
return resp, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getAsyncOperation(resp *http.Response) string {
|
||||
return resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation))
|
||||
}
|
||||
|
||||
func hasSucceeded(state string) bool {
|
||||
return state == operationSucceeded
|
||||
}
|
||||
|
||||
func hasTerminated(state string) bool {
|
||||
switch state {
|
||||
case operationCanceled, operationFailed, operationSucceeded:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func hasFailed(state string) bool {
|
||||
return state == operationFailed
|
||||
}
|
||||
|
||||
type provisioningTracker interface {
|
||||
state() string
|
||||
hasSucceeded() bool
|
||||
hasTerminated() bool
|
||||
}
|
||||
|
||||
type operationResource struct {
|
||||
// Note:
|
||||
// The specification states services should return the "id" field. However some return it as
|
||||
// "operationId".
|
||||
ID string `json:"id"`
|
||||
OperationID string `json:"operationId"`
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"`
|
||||
Properties map[string]interface{} `json:"properties"`
|
||||
OperationError ServiceError `json:"error"`
|
||||
StartTime date.Time `json:"startTime"`
|
||||
EndTime date.Time `json:"endTime"`
|
||||
PercentComplete float64 `json:"percentComplete"`
|
||||
}
|
||||
|
||||
func (or operationResource) state() string {
|
||||
return or.Status
|
||||
}
|
||||
|
||||
func (or operationResource) hasSucceeded() bool {
|
||||
return hasSucceeded(or.state())
|
||||
}
|
||||
|
||||
func (or operationResource) hasTerminated() bool {
|
||||
return hasTerminated(or.state())
|
||||
}
|
||||
|
||||
type provisioningProperties struct {
|
||||
ProvisioningState string `json:"provisioningState"`
|
||||
}
|
||||
|
||||
type provisioningStatus struct {
|
||||
Properties provisioningProperties `json:"properties,omitempty"`
|
||||
ProvisioningError ServiceError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
func (ps provisioningStatus) state() string {
|
||||
return ps.Properties.ProvisioningState
|
||||
}
|
||||
|
||||
func (ps provisioningStatus) hasSucceeded() bool {
|
||||
return hasSucceeded(ps.state())
|
||||
}
|
||||
|
||||
func (ps provisioningStatus) hasTerminated() bool {
|
||||
return hasTerminated(ps.state())
|
||||
}
|
||||
|
||||
func (ps provisioningStatus) hasProvisioningError() bool {
|
||||
return ps.ProvisioningError != ServiceError{}
|
||||
}
|
||||
|
||||
type pollingResponseFormat string
|
||||
|
||||
const (
|
||||
usesOperationResponse pollingResponseFormat = "OperationResponse"
|
||||
usesProvisioningStatus pollingResponseFormat = "ProvisioningStatus"
|
||||
formatIsUnknown pollingResponseFormat = ""
|
||||
)
|
||||
|
||||
type pollingState struct {
|
||||
responseFormat pollingResponseFormat
|
||||
uri string
|
||||
state string
|
||||
code string
|
||||
message string
|
||||
}
|
||||
|
||||
func (ps pollingState) hasSucceeded() bool {
|
||||
return hasSucceeded(ps.state)
|
||||
}
|
||||
|
||||
func (ps pollingState) hasTerminated() bool {
|
||||
return hasTerminated(ps.state)
|
||||
}
|
||||
|
||||
func (ps pollingState) hasFailed() bool {
|
||||
return hasFailed(ps.state)
|
||||
}
|
||||
|
||||
func (ps pollingState) Error() string {
|
||||
return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.state, ps.code, ps.message)
|
||||
}
|
||||
|
||||
// updatePollingState maps the operation status -- retrieved from either a provisioningState
|
||||
// field, the status field of an OperationResource, or inferred from the HTTP status code --
|
||||
// into a well-known states. Since the process begins from the initial request, the state
|
||||
// always comes from either a the provisioningState returned or is inferred from the HTTP
|
||||
// status code. Subsequent requests will read an Azure OperationResource object if the
|
||||
// service initially returned the Azure-AsyncOperation header. The responseFormat field notes
|
||||
// the expected response format.
|
||||
func updatePollingState(resp *http.Response, ps *pollingState) error {
|
||||
// Determine the response shape
|
||||
// -- The first response will always be a provisioningStatus response; only the polling requests,
|
||||
// depending on the header returned, may be something otherwise.
|
||||
var pt provisioningTracker
|
||||
if ps.responseFormat == usesOperationResponse {
|
||||
pt = &operationResource{}
|
||||
} else {
|
||||
pt = &provisioningStatus{}
|
||||
}
|
||||
|
||||
// If this is the first request (that is, the polling response shape is unknown), determine how
|
||||
// to poll and what to expect
|
||||
if ps.responseFormat == formatIsUnknown {
|
||||
req := resp.Request
|
||||
if req == nil {
|
||||
return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing")
|
||||
}
|
||||
|
||||
// Prefer the Azure-AsyncOperation header
|
||||
ps.uri = getAsyncOperation(resp)
|
||||
if ps.uri != "" {
|
||||
ps.responseFormat = usesOperationResponse
|
||||
} else {
|
||||
ps.responseFormat = usesProvisioningStatus
|
||||
}
|
||||
|
||||
// Else, use the Location header
|
||||
if ps.uri == "" {
|
||||
ps.uri = autorest.GetLocation(resp)
|
||||
}
|
||||
|
||||
// Lastly, requests against an existing resource, use the last request URI
|
||||
if ps.uri == "" {
|
||||
m := strings.ToUpper(req.Method)
|
||||
if m == methodPatch || m == methodPut || m == methodGet {
|
||||
ps.uri = req.URL.String()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Read and interpret the response (saving the Body in case no polling is necessary)
|
||||
b := &bytes.Buffer{}
|
||||
err := autorest.Respond(resp,
|
||||
autorest.ByCopying(b),
|
||||
autorest.ByUnmarshallingJSON(pt),
|
||||
autorest.ByClosing())
|
||||
resp.Body = ioutil.NopCloser(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Interpret the results
|
||||
// -- Terminal states apply regardless
|
||||
// -- Unknown states are per-service inprogress states
|
||||
// -- Otherwise, infer state from HTTP status code
|
||||
if pt.hasTerminated() {
|
||||
ps.state = pt.state()
|
||||
} else if pt.state() != "" {
|
||||
ps.state = operationInProgress
|
||||
} else {
|
||||
switch resp.StatusCode {
|
||||
case http.StatusAccepted:
|
||||
ps.state = operationInProgress
|
||||
|
||||
case http.StatusNoContent, http.StatusCreated, http.StatusOK:
|
||||
ps.state = operationSucceeded
|
||||
|
||||
default:
|
||||
ps.state = operationFailed
|
||||
}
|
||||
}
|
||||
|
||||
if ps.state == operationInProgress && ps.uri == "" {
|
||||
return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL)
|
||||
}
|
||||
|
||||
// For failed operation, check for error code and message in
|
||||
// -- Operation resource
|
||||
// -- Response
|
||||
// -- Otherwise, Unknown
|
||||
if ps.hasFailed() {
|
||||
if ps.responseFormat == usesOperationResponse {
|
||||
or := pt.(*operationResource)
|
||||
ps.code = or.OperationError.Code
|
||||
ps.message = or.OperationError.Message
|
||||
} else {
|
||||
p := pt.(*provisioningStatus)
|
||||
if p.hasProvisioningError() {
|
||||
ps.code = p.ProvisioningError.Code
|
||||
ps.message = p.ProvisioningError.Message
|
||||
} else {
|
||||
ps.code = "Unknown"
|
||||
ps.message = "None"
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newPollingRequest(resp *http.Response, ps pollingState) (*http.Request, error) {
|
||||
req := resp.Request
|
||||
if req == nil {
|
||||
return nil, autorest.NewError("azure", "newPollingRequest", "Azure Polling Error - Original HTTP request is missing")
|
||||
}
|
||||
|
||||
reqPoll, err := autorest.Prepare(&http.Request{Cancel: req.Cancel},
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(ps.uri))
|
||||
if err != nil {
|
||||
return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.uri)
|
||||
}
|
||||
|
||||
return reqPoll, nil
|
||||
}
|
|
@ -0,0 +1,180 @@
|
|||
/*
|
||||
Package azure provides Azure-specific implementations used with AutoRest.
|
||||
|
||||
See the included examples for more detail.
|
||||
*/
|
||||
package azure
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
)
|
||||
|
||||
const (
|
||||
// HeaderClientID is the Azure extension header to set a user-specified request ID.
|
||||
HeaderClientID = "x-ms-client-request-id"
|
||||
|
||||
// HeaderReturnClientID is the Azure extension header to set if the user-specified request ID
|
||||
// should be included in the response.
|
||||
HeaderReturnClientID = "x-ms-return-client-request-id"
|
||||
|
||||
// HeaderRequestID is the Azure extension header of the service generated request ID returned
|
||||
// in the response.
|
||||
HeaderRequestID = "x-ms-request-id"
|
||||
)
|
||||
|
||||
// ServiceError encapsulates the error response from an Azure service.
|
||||
type ServiceError struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Details *[]interface{} `json:"details"`
|
||||
}
|
||||
|
||||
func (se ServiceError) Error() string {
|
||||
if se.Details != nil {
|
||||
d, err := json.Marshal(*(se.Details))
|
||||
if err != nil {
|
||||
return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, *se.Details)
|
||||
}
|
||||
return fmt.Sprintf("Code=%q Message=%q Details=%v", se.Code, se.Message, string(d))
|
||||
}
|
||||
return fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message)
|
||||
}
|
||||
|
||||
// RequestError describes an error response returned by Azure service.
|
||||
type RequestError struct {
|
||||
autorest.DetailedError
|
||||
|
||||
// The error returned by the Azure service.
|
||||
ServiceError *ServiceError `json:"error"`
|
||||
|
||||
// The request id (from the x-ms-request-id-header) of the request.
|
||||
RequestID string
|
||||
}
|
||||
|
||||
// Error returns a human-friendly error message from service error.
|
||||
func (e RequestError) Error() string {
|
||||
return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v",
|
||||
e.StatusCode, e.ServiceError)
|
||||
}
|
||||
|
||||
// IsAzureError returns true if the passed error is an Azure Service error; false otherwise.
|
||||
func IsAzureError(e error) bool {
|
||||
_, ok := e.(*RequestError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// NewErrorWithError creates a new Error conforming object from the
|
||||
// passed packageType, method, statusCode of the given resp (UndefinedStatusCode
|
||||
// if resp is nil), message, and original error. message is treated as a format
|
||||
// string to which the optional args apply.
|
||||
func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError {
|
||||
if v, ok := original.(*RequestError); ok {
|
||||
return *v
|
||||
}
|
||||
|
||||
statusCode := autorest.UndefinedStatusCode
|
||||
if resp != nil {
|
||||
statusCode = resp.StatusCode
|
||||
}
|
||||
return RequestError{
|
||||
DetailedError: autorest.DetailedError{
|
||||
Original: original,
|
||||
PackageType: packageType,
|
||||
Method: method,
|
||||
StatusCode: statusCode,
|
||||
Message: fmt.Sprintf(message, args...),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of
|
||||
// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g.,
|
||||
// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id
|
||||
// header to true such that UUID accompanies the http.Response.
|
||||
func WithReturningClientID(uuid string) autorest.PrepareDecorator {
|
||||
preparer := autorest.CreatePreparer(
|
||||
WithClientID(uuid),
|
||||
WithReturnClientID(true))
|
||||
|
||||
return func(p autorest.Preparer) autorest.Preparer {
|
||||
return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
r, err := p.Prepare(r)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
return preparer.Prepare(r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// WithClientID returns a PrepareDecorator that adds an HTTP extension header of
|
||||
// x-ms-client-request-id whose value is passed, undecorated UUID (e.g.,
|
||||
// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA").
|
||||
func WithClientID(uuid string) autorest.PrepareDecorator {
|
||||
return autorest.WithHeader(HeaderClientID, uuid)
|
||||
}
|
||||
|
||||
// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of
|
||||
// x-ms-return-client-request-id whose boolean value indicates if the value of the
|
||||
// x-ms-client-request-id header should be included in the http.Response.
|
||||
func WithReturnClientID(b bool) autorest.PrepareDecorator {
|
||||
return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b))
|
||||
}
|
||||
|
||||
// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the
|
||||
// http.Request sent to the service (and returned in the http.Response)
|
||||
func ExtractClientID(resp *http.Response) string {
|
||||
return autorest.ExtractHeaderValue(HeaderClientID, resp)
|
||||
}
|
||||
|
||||
// ExtractRequestID extracts the Azure server generated request identifier from the
|
||||
// x-ms-request-id header.
|
||||
func ExtractRequestID(resp *http.Response) string {
|
||||
return autorest.ExtractHeaderValue(HeaderRequestID, resp)
|
||||
}
|
||||
|
||||
// WithErrorUnlessStatusCode returns a RespondDecorator that emits an
|
||||
// azure.RequestError by reading the response body unless the response HTTP status code
|
||||
// is among the set passed.
|
||||
//
|
||||
// If there is a chance service may return responses other than the Azure error
|
||||
// format and the response cannot be parsed into an error, a decoding error will
|
||||
// be returned containing the response body. In any case, the Responder will
|
||||
// return an error if the status code is not satisfied.
|
||||
//
|
||||
// If this Responder returns an error, the response body will be replaced with
|
||||
// an in-memory reader, which needs no further closing.
|
||||
func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator {
|
||||
return func(r autorest.Responder) autorest.Responder {
|
||||
return autorest.ResponderFunc(func(resp *http.Response) error {
|
||||
err := r.Respond(resp)
|
||||
if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) {
|
||||
var e RequestError
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Copy and replace the Body in case it does not contain an error object.
|
||||
// This will leave the Body available to the caller.
|
||||
b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e)
|
||||
resp.Body = ioutil.NopCloser(&b)
|
||||
if decodeErr != nil {
|
||||
return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr)
|
||||
} else if e.ServiceError == nil {
|
||||
e.ServiceError = &ServiceError{Code: "Unknown", Message: "Unknown service error"}
|
||||
}
|
||||
|
||||
e.RequestID = ExtractRequestID(resp)
|
||||
if e.StatusCode == nil {
|
||||
e.StatusCode = resp.StatusCode
|
||||
}
|
||||
err = &e
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
package azure
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// OAuthConfig represents the endpoints needed
|
||||
// in OAuth operations
|
||||
type OAuthConfig struct {
|
||||
AuthorizeEndpoint url.URL
|
||||
TokenEndpoint url.URL
|
||||
DeviceCodeEndpoint url.URL
|
||||
}
|
193
vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go
generated
vendored
Normal file
193
vendor/github.com/Azure/go-autorest/autorest/azure/devicetoken.go
generated
vendored
Normal file
|
@ -0,0 +1,193 @@
|
|||
package azure
|
||||
|
||||
/*
|
||||
This file is largely based on rjw57/oauth2device's code, with the follow differences:
|
||||
* scope -> resource, and only allow a single one
|
||||
* receive "Message" in the DeviceCode struct and show it to users as the prompt
|
||||
* azure-xplat-cli has the following behavior that this emulates:
|
||||
- does not send client_secret during the token exchange
|
||||
- sends resource again in the token exchange request
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
)
|
||||
|
||||
const (
|
||||
logPrefix = "autorest/azure/devicetoken:"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow
|
||||
ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix)
|
||||
|
||||
// ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow
|
||||
ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix)
|
||||
|
||||
// ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow
|
||||
ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix)
|
||||
|
||||
// ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow
|
||||
ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix)
|
||||
|
||||
// ErrDeviceSlowDown represents the service telling us we're polling too often during device flow
|
||||
ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix)
|
||||
|
||||
errCodeSendingFails = "Error occurred while sending request for Device Authorization Code"
|
||||
errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint"
|
||||
errTokenSendingFails = "Error occurred while sending request with device code for a token"
|
||||
errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)"
|
||||
)
|
||||
|
||||
// DeviceCode is the object returned by the device auth endpoint
|
||||
// It contains information to instruct the user to complete the auth flow
|
||||
type DeviceCode struct {
|
||||
DeviceCode *string `json:"device_code,omitempty"`
|
||||
UserCode *string `json:"user_code,omitempty"`
|
||||
VerificationURL *string `json:"verification_url,omitempty"`
|
||||
ExpiresIn *int64 `json:"expires_in,string,omitempty"`
|
||||
Interval *int64 `json:"interval,string,omitempty"`
|
||||
|
||||
Message *string `json:"message"` // Azure specific
|
||||
Resource string // store the following, stored when initiating, used when exchanging
|
||||
OAuthConfig OAuthConfig
|
||||
ClientID string
|
||||
}
|
||||
|
||||
// TokenError is the object returned by the token exchange endpoint
|
||||
// when something is amiss
|
||||
type TokenError struct {
|
||||
Error *string `json:"error,omitempty"`
|
||||
ErrorCodes []int `json:"error_codes,omitempty"`
|
||||
ErrorDescription *string `json:"error_description,omitempty"`
|
||||
Timestamp *string `json:"timestamp,omitempty"`
|
||||
TraceID *string `json:"trace_id,omitempty"`
|
||||
}
|
||||
|
||||
// DeviceToken is the object return by the token exchange endpoint
|
||||
// It can either look like a Token or an ErrorToken, so put both here
|
||||
// and check for presence of "Error" to know if we are in error state
|
||||
type deviceToken struct {
|
||||
Token
|
||||
TokenError
|
||||
}
|
||||
|
||||
// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode
|
||||
// that can be used with CheckForUserCompletion or WaitForUserCompletion.
|
||||
func InitiateDeviceAuth(client *autorest.Client, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) {
|
||||
req, _ := autorest.Prepare(
|
||||
&http.Request{},
|
||||
autorest.AsPost(),
|
||||
autorest.AsFormURLEncoded(),
|
||||
autorest.WithBaseURL(oauthConfig.DeviceCodeEndpoint.String()),
|
||||
autorest.WithFormData(url.Values{
|
||||
"client_id": []string{clientID},
|
||||
"resource": []string{resource},
|
||||
}),
|
||||
)
|
||||
|
||||
resp, err := autorest.SendWithSender(client, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err)
|
||||
}
|
||||
|
||||
var code DeviceCode
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
autorest.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&code),
|
||||
autorest.ByClosing())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err)
|
||||
}
|
||||
|
||||
code.ClientID = clientID
|
||||
code.Resource = resource
|
||||
code.OAuthConfig = oauthConfig
|
||||
|
||||
return &code, nil
|
||||
}
|
||||
|
||||
// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint
|
||||
// to see if the device flow has: been completed, timed out, or otherwise failed
|
||||
func CheckForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) {
|
||||
req, _ := autorest.Prepare(
|
||||
&http.Request{},
|
||||
autorest.AsPost(),
|
||||
autorest.AsFormURLEncoded(),
|
||||
autorest.WithBaseURL(code.OAuthConfig.TokenEndpoint.String()),
|
||||
autorest.WithFormData(url.Values{
|
||||
"client_id": []string{code.ClientID},
|
||||
"code": []string{*code.DeviceCode},
|
||||
"grant_type": []string{OAuthGrantTypeDeviceCode},
|
||||
"resource": []string{code.Resource},
|
||||
}),
|
||||
)
|
||||
|
||||
resp, err := autorest.SendWithSender(client, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err)
|
||||
}
|
||||
|
||||
var token deviceToken
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest),
|
||||
autorest.ByUnmarshallingJSON(&token),
|
||||
autorest.ByClosing())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err)
|
||||
}
|
||||
|
||||
if token.Error == nil {
|
||||
return &token.Token, nil
|
||||
}
|
||||
|
||||
switch *token.Error {
|
||||
case "authorization_pending":
|
||||
return nil, ErrDeviceAuthorizationPending
|
||||
case "slow_down":
|
||||
return nil, ErrDeviceSlowDown
|
||||
case "access_denied":
|
||||
return nil, ErrDeviceAccessDenied
|
||||
case "code_expired":
|
||||
return nil, ErrDeviceCodeExpired
|
||||
default:
|
||||
return nil, ErrDeviceGeneric
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs.
|
||||
// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'.
|
||||
func WaitForUserCompletion(client *autorest.Client, code *DeviceCode) (*Token, error) {
|
||||
intervalDuration := time.Duration(*code.Interval) * time.Second
|
||||
waitDuration := intervalDuration
|
||||
|
||||
for {
|
||||
token, err := CheckForUserCompletion(client, code)
|
||||
|
||||
if err == nil {
|
||||
return token, nil
|
||||
}
|
||||
|
||||
switch err {
|
||||
case ErrDeviceSlowDown:
|
||||
waitDuration += waitDuration
|
||||
case ErrDeviceAuthorizationPending:
|
||||
// noop
|
||||
default: // everything else is "fatal" to us
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if waitDuration > (intervalDuration * 3) {
|
||||
return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix)
|
||||
}
|
||||
|
||||
time.Sleep(waitDuration)
|
||||
}
|
||||
}
|
167
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
Normal file
167
vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
generated
vendored
Normal file
|
@ -0,0 +1,167 @@
|
|||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
activeDirectoryAPIVersion = "1.0"
|
||||
)
|
||||
|
||||
var environments = map[string]Environment{
|
||||
"AZURECHINACLOUD": ChinaCloud,
|
||||
"AZUREGERMANCLOUD": GermanCloud,
|
||||
"AZUREPUBLICCLOUD": PublicCloud,
|
||||
"AZUREUSGOVERNMENTCLOUD": USGovernmentCloud,
|
||||
}
|
||||
|
||||
// Environment represents a set of endpoints for each of Azure's Clouds.
|
||||
type Environment struct {
|
||||
Name string `json:"name"`
|
||||
ManagementPortalURL string `json:"managementPortalURL"`
|
||||
PublishSettingsURL string `json:"publishSettingsURL"`
|
||||
ServiceManagementEndpoint string `json:"serviceManagementEndpoint"`
|
||||
ResourceManagerEndpoint string `json:"resourceManagerEndpoint"`
|
||||
ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"`
|
||||
GalleryEndpoint string `json:"galleryEndpoint"`
|
||||
KeyVaultEndpoint string `json:"keyVaultEndpoint"`
|
||||
GraphEndpoint string `json:"graphEndpoint"`
|
||||
StorageEndpointSuffix string `json:"storageEndpointSuffix"`
|
||||
SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"`
|
||||
TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"`
|
||||
KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"`
|
||||
ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"`
|
||||
ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
|
||||
ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"`
|
||||
ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
|
||||
}
|
||||
|
||||
var (
|
||||
// PublicCloud is the default public Azure cloud environment
|
||||
PublicCloud = Environment{
|
||||
Name: "AzurePublicCloud",
|
||||
ManagementPortalURL: "https://manage.windowsazure.com/",
|
||||
PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index",
|
||||
ServiceManagementEndpoint: "https://management.core.windows.net/",
|
||||
ResourceManagerEndpoint: "https://management.azure.com/",
|
||||
ActiveDirectoryEndpoint: "https://login.microsoftonline.com/",
|
||||
GalleryEndpoint: "https://gallery.azure.com/",
|
||||
KeyVaultEndpoint: "https://vault.azure.net/",
|
||||
GraphEndpoint: "https://graph.windows.net/",
|
||||
StorageEndpointSuffix: "core.windows.net",
|
||||
SQLDatabaseDNSSuffix: "database.windows.net",
|
||||
TrafficManagerDNSSuffix: "trafficmanager.net",
|
||||
KeyVaultDNSSuffix: "vault.azure.net",
|
||||
ServiceBusEndpointSuffix: "servicebus.azure.com",
|
||||
ServiceManagementVMDNSSuffix: "cloudapp.net",
|
||||
ResourceManagerVMDNSSuffix: "cloudapp.azure.com",
|
||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
||||
}
|
||||
|
||||
// USGovernmentCloud is the cloud environment for the US Government
|
||||
USGovernmentCloud = Environment{
|
||||
Name: "AzureUSGovernmentCloud",
|
||||
ManagementPortalURL: "https://manage.windowsazure.us/",
|
||||
PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index",
|
||||
ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/",
|
||||
ResourceManagerEndpoint: "https://management.usgovcloudapi.net/",
|
||||
ActiveDirectoryEndpoint: "https://login.microsoftonline.com/",
|
||||
GalleryEndpoint: "https://gallery.usgovcloudapi.net/",
|
||||
KeyVaultEndpoint: "https://vault.usgovcloudapi.net/",
|
||||
GraphEndpoint: "https://graph.usgovcloudapi.net/",
|
||||
StorageEndpointSuffix: "core.usgovcloudapi.net",
|
||||
SQLDatabaseDNSSuffix: "database.usgovcloudapi.net",
|
||||
TrafficManagerDNSSuffix: "usgovtrafficmanager.net",
|
||||
KeyVaultDNSSuffix: "vault.usgovcloudapi.net",
|
||||
ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net",
|
||||
ServiceManagementVMDNSSuffix: "usgovcloudapp.net",
|
||||
ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us",
|
||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
||||
}
|
||||
|
||||
// ChinaCloud is the cloud environment operated in China
|
||||
ChinaCloud = Environment{
|
||||
Name: "AzureChinaCloud",
|
||||
ManagementPortalURL: "https://manage.chinacloudapi.com/",
|
||||
PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index",
|
||||
ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/",
|
||||
ResourceManagerEndpoint: "https://management.chinacloudapi.cn/",
|
||||
ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/",
|
||||
GalleryEndpoint: "https://gallery.chinacloudapi.cn/",
|
||||
KeyVaultEndpoint: "https://vault.azure.cn/",
|
||||
GraphEndpoint: "https://graph.chinacloudapi.cn/",
|
||||
StorageEndpointSuffix: "core.chinacloudapi.cn",
|
||||
SQLDatabaseDNSSuffix: "database.chinacloudapi.cn",
|
||||
TrafficManagerDNSSuffix: "trafficmanager.cn",
|
||||
KeyVaultDNSSuffix: "vault.azure.cn",
|
||||
ServiceBusEndpointSuffix: "servicebus.chinacloudapi.net",
|
||||
ServiceManagementVMDNSSuffix: "chinacloudapp.cn",
|
||||
ResourceManagerVMDNSSuffix: "cloudapp.azure.cn",
|
||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
||||
}
|
||||
|
||||
// GermanCloud is the cloud environment operated in Germany
|
||||
GermanCloud = Environment{
|
||||
Name: "AzureGermanCloud",
|
||||
ManagementPortalURL: "http://portal.microsoftazure.de/",
|
||||
PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index",
|
||||
ServiceManagementEndpoint: "https://management.core.cloudapi.de/",
|
||||
ResourceManagerEndpoint: "https://management.microsoftazure.de/",
|
||||
ActiveDirectoryEndpoint: "https://login.microsoftonline.de/",
|
||||
GalleryEndpoint: "https://gallery.cloudapi.de/",
|
||||
KeyVaultEndpoint: "https://vault.microsoftazure.de/",
|
||||
GraphEndpoint: "https://graph.cloudapi.de/",
|
||||
StorageEndpointSuffix: "core.cloudapi.de",
|
||||
SQLDatabaseDNSSuffix: "database.cloudapi.de",
|
||||
TrafficManagerDNSSuffix: "azuretrafficmanager.de",
|
||||
KeyVaultDNSSuffix: "vault.microsoftazure.de",
|
||||
ServiceBusEndpointSuffix: "servicebus.cloudapi.de",
|
||||
ServiceManagementVMDNSSuffix: "azurecloudapp.de",
|
||||
ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de",
|
||||
ContainerRegistryDNSSuffix: "azurecr.io",
|
||||
}
|
||||
)
|
||||
|
||||
// EnvironmentFromName returns an Environment based on the common name specified
|
||||
func EnvironmentFromName(name string) (Environment, error) {
|
||||
name = strings.ToUpper(name)
|
||||
env, ok := environments[name]
|
||||
if !ok {
|
||||
return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name)
|
||||
}
|
||||
return env, nil
|
||||
}
|
||||
|
||||
// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls
|
||||
func (env Environment) OAuthConfigForTenant(tenantID string) (*OAuthConfig, error) {
|
||||
return OAuthConfigForTenant(env.ActiveDirectoryEndpoint, tenantID)
|
||||
}
|
||||
|
||||
// OAuthConfigForTenant returns an OAuthConfig with tenant specific urls for target cloud auth endpoint
|
||||
func OAuthConfigForTenant(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) {
|
||||
template := "%s/oauth2/%s?api-version=%s"
|
||||
u, err := url.Parse(activeDirectoryEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
authorizeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "authorize", activeDirectoryAPIVersion))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tokenURL, err := u.Parse(fmt.Sprintf(template, tenantID, "token", activeDirectoryAPIVersion))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deviceCodeURL, err := u.Parse(fmt.Sprintf(template, tenantID, "devicecode", activeDirectoryAPIVersion))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &OAuthConfig{
|
||||
AuthorizeEndpoint: *authorizeURL,
|
||||
TokenEndpoint: *tokenURL,
|
||||
DeviceCodeEndpoint: *deviceCodeURL,
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
package azure
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// LoadToken restores a Token object from a file located at 'path'.
|
||||
func LoadToken(path string) (*Token, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var token Token
|
||||
|
||||
dec := json.NewDecoder(file)
|
||||
if err = dec.Decode(&token); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err)
|
||||
}
|
||||
return &token, nil
|
||||
}
|
||||
|
||||
// SaveToken persists an oauth token at the given location on disk.
|
||||
// It moves the new file into place so it can safely be used to replace an existing file
|
||||
// that maybe accessed by multiple processes.
|
||||
func SaveToken(path string, mode os.FileMode, token Token) error {
|
||||
dir := filepath.Dir(path)
|
||||
err := os.MkdirAll(dir, os.ModePerm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err)
|
||||
}
|
||||
|
||||
newFile, err := ioutil.TempFile(dir, "token")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create the temp file to write the token: %v", err)
|
||||
}
|
||||
tempPath := newFile.Name()
|
||||
|
||||
if err := json.NewEncoder(newFile).Encode(token); err != nil {
|
||||
return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err)
|
||||
}
|
||||
if err := newFile.Close(); err != nil {
|
||||
return fmt.Errorf("failed to close temp file %s: %v", tempPath, err)
|
||||
}
|
||||
|
||||
// Atomic replace to avoid multi-writer file corruptions
|
||||
if err := os.Rename(tempPath, path); err != nil {
|
||||
return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err)
|
||||
}
|
||||
if err := os.Chmod(path, mode); err != nil {
|
||||
return fmt.Errorf("failed to chmod the token file %s: %v", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,363 @@
|
|||
package azure
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/dgrijalva/jwt-go"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultRefresh = 5 * time.Minute
|
||||
tokenBaseDate = "1970-01-01T00:00:00Z"
|
||||
|
||||
// OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow
|
||||
OAuthGrantTypeDeviceCode = "device_code"
|
||||
|
||||
// OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows
|
||||
OAuthGrantTypeClientCredentials = "client_credentials"
|
||||
|
||||
// OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows
|
||||
OAuthGrantTypeRefreshToken = "refresh_token"
|
||||
)
|
||||
|
||||
var expirationBase time.Time
|
||||
|
||||
func init() {
|
||||
expirationBase, _ = time.Parse(time.RFC3339, tokenBaseDate)
|
||||
}
|
||||
|
||||
// TokenRefreshCallback is the type representing callbacks that will be called after
|
||||
// a successful token refresh
|
||||
type TokenRefreshCallback func(Token) error
|
||||
|
||||
// Token encapsulates the access token used to authorize Azure requests.
|
||||
type Token struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
|
||||
ExpiresIn string `json:"expires_in"`
|
||||
ExpiresOn string `json:"expires_on"`
|
||||
NotBefore string `json:"not_before"`
|
||||
|
||||
Resource string `json:"resource"`
|
||||
Type string `json:"token_type"`
|
||||
}
|
||||
|
||||
// Expires returns the time.Time when the Token expires.
|
||||
func (t Token) Expires() time.Time {
|
||||
s, err := strconv.Atoi(t.ExpiresOn)
|
||||
if err != nil {
|
||||
s = -3600
|
||||
}
|
||||
return expirationBase.Add(time.Duration(s) * time.Second).UTC()
|
||||
}
|
||||
|
||||
// IsExpired returns true if the Token is expired, false otherwise.
|
||||
func (t Token) IsExpired() bool {
|
||||
return t.WillExpireIn(0)
|
||||
}
|
||||
|
||||
// WillExpireIn returns true if the Token will expire after the passed time.Duration interval
|
||||
// from now, false otherwise.
|
||||
func (t Token) WillExpireIn(d time.Duration) bool {
|
||||
return !t.Expires().After(time.Now().Add(d))
|
||||
}
|
||||
|
||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
||||
// value is "Bearer " followed by the AccessToken of the Token.
|
||||
func (t *Token) WithAuthorization() autorest.PrepareDecorator {
|
||||
return func(p autorest.Preparer) autorest.Preparer {
|
||||
return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
return (autorest.WithBearerAuthorization(t.AccessToken)(p)).Prepare(r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ServicePrincipalNoSecret represents a secret type that contains no secret
|
||||
// meaning it is not valid for fetching a fresh token. This is used by Manual
|
||||
type ServicePrincipalNoSecret struct {
|
||||
}
|
||||
|
||||
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret
|
||||
// It only returns an error for the ServicePrincipalNoSecret type
|
||||
func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
||||
return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token")
|
||||
}
|
||||
|
||||
// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form
|
||||
// that is submitted when acquiring an oAuth token.
|
||||
type ServicePrincipalSecret interface {
|
||||
SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error
|
||||
}
|
||||
|
||||
// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization.
|
||||
type ServicePrincipalTokenSecret struct {
|
||||
ClientSecret string
|
||||
}
|
||||
|
||||
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
||||
// It will populate the form submitted during oAuth Token Acquisition using the client_secret.
|
||||
func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
||||
v.Set("client_secret", tokenSecret.ClientSecret)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs.
|
||||
type ServicePrincipalCertificateSecret struct {
|
||||
Certificate *x509.Certificate
|
||||
PrivateKey *rsa.PrivateKey
|
||||
}
|
||||
|
||||
// SignJwt returns the JWT signed with the certificate's private key.
|
||||
func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) {
|
||||
hasher := sha1.New()
|
||||
_, err := hasher.Write(secret.Certificate.Raw)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil))
|
||||
|
||||
// The jti (JWT ID) claim provides a unique identifier for the JWT.
|
||||
jti := make([]byte, 20)
|
||||
_, err = rand.Read(jti)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
token := jwt.New(jwt.SigningMethodRS256)
|
||||
token.Header["x5t"] = thumbprint
|
||||
token.Claims = jwt.MapClaims{
|
||||
"aud": spt.oauthConfig.TokenEndpoint.String(),
|
||||
"iss": spt.clientID,
|
||||
"sub": spt.clientID,
|
||||
"jti": base64.URLEncoding.EncodeToString(jti),
|
||||
"nbf": time.Now().Unix(),
|
||||
"exp": time.Now().Add(time.Hour * 24).Unix(),
|
||||
}
|
||||
|
||||
signedString, err := token.SignedString(secret.PrivateKey)
|
||||
return signedString, err
|
||||
}
|
||||
|
||||
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
|
||||
// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate.
|
||||
func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
|
||||
jwt, err := secret.SignJwt(spt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v.Set("client_assertion", jwt)
|
||||
v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer")
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServicePrincipalToken encapsulates a Token created for a Service Principal.
|
||||
type ServicePrincipalToken struct {
|
||||
Token
|
||||
|
||||
secret ServicePrincipalSecret
|
||||
oauthConfig OAuthConfig
|
||||
clientID string
|
||||
resource string
|
||||
autoRefresh bool
|
||||
refreshWithin time.Duration
|
||||
sender autorest.Sender
|
||||
|
||||
refreshCallbacks []TokenRefreshCallback
|
||||
}
|
||||
|
||||
// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation.
|
||||
func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
||||
spt := &ServicePrincipalToken{
|
||||
oauthConfig: oauthConfig,
|
||||
secret: secret,
|
||||
clientID: id,
|
||||
resource: resource,
|
||||
autoRefresh: true,
|
||||
refreshWithin: defaultRefresh,
|
||||
sender: &http.Client{},
|
||||
refreshCallbacks: callbacks,
|
||||
}
|
||||
return spt, nil
|
||||
}
|
||||
|
||||
// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token
|
||||
func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
||||
spt, err := NewServicePrincipalTokenWithSecret(
|
||||
oauthConfig,
|
||||
clientID,
|
||||
resource,
|
||||
&ServicePrincipalNoSecret{},
|
||||
callbacks...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
spt.Token = token
|
||||
|
||||
return spt, nil
|
||||
}
|
||||
|
||||
// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal
|
||||
// credentials scoped to the named resource.
|
||||
func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
||||
return NewServicePrincipalTokenWithSecret(
|
||||
oauthConfig,
|
||||
clientID,
|
||||
resource,
|
||||
&ServicePrincipalTokenSecret{
|
||||
ClientSecret: secret,
|
||||
},
|
||||
callbacks...,
|
||||
)
|
||||
}
|
||||
|
||||
// NewServicePrincipalTokenFromCertificate create a ServicePrincipalToken from the supplied pkcs12 bytes.
|
||||
func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
|
||||
return NewServicePrincipalTokenWithSecret(
|
||||
oauthConfig,
|
||||
clientID,
|
||||
resource,
|
||||
&ServicePrincipalCertificateSecret{
|
||||
PrivateKey: privateKey,
|
||||
Certificate: certificate,
|
||||
},
|
||||
callbacks...,
|
||||
)
|
||||
}
|
||||
|
||||
// EnsureFresh will refresh the token if it will expire within the refresh window (as set by
|
||||
// RefreshWithin).
|
||||
func (spt *ServicePrincipalToken) EnsureFresh() error {
|
||||
if spt.WillExpireIn(spt.refreshWithin) {
|
||||
return spt.Refresh()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization
|
||||
func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error {
|
||||
if spt.refreshCallbacks != nil {
|
||||
for _, callback := range spt.refreshCallbacks {
|
||||
err := callback(spt.Token)
|
||||
if err != nil {
|
||||
return autorest.NewErrorWithError(err,
|
||||
"azure.ServicePrincipalToken", "InvokeRefreshCallbacks", nil, "A TokenRefreshCallback handler returned an error")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Refresh obtains a fresh token for the Service Principal.
|
||||
func (spt *ServicePrincipalToken) Refresh() error {
|
||||
return spt.refreshInternal(spt.resource)
|
||||
}
|
||||
|
||||
// RefreshExchange refreshes the token, but for a different resource.
|
||||
func (spt *ServicePrincipalToken) RefreshExchange(resource string) error {
|
||||
return spt.refreshInternal(resource)
|
||||
}
|
||||
|
||||
func (spt *ServicePrincipalToken) refreshInternal(resource string) error {
|
||||
v := url.Values{}
|
||||
v.Set("client_id", spt.clientID)
|
||||
v.Set("resource", resource)
|
||||
|
||||
if spt.RefreshToken != "" {
|
||||
v.Set("grant_type", OAuthGrantTypeRefreshToken)
|
||||
v.Set("refresh_token", spt.RefreshToken)
|
||||
} else {
|
||||
v.Set("grant_type", OAuthGrantTypeClientCredentials)
|
||||
err := spt.secret.SetAuthenticationValues(spt, &v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
req, _ := autorest.Prepare(&http.Request{},
|
||||
autorest.AsPost(),
|
||||
autorest.AsFormURLEncoded(),
|
||||
autorest.WithBaseURL(spt.oauthConfig.TokenEndpoint.String()),
|
||||
autorest.WithFormData(v))
|
||||
|
||||
resp, err := autorest.SendWithSender(spt.sender, req)
|
||||
if err != nil {
|
||||
return autorest.NewErrorWithError(err,
|
||||
"azure.ServicePrincipalToken", "Refresh", resp, "Failure sending request for Service Principal %s",
|
||||
spt.clientID)
|
||||
}
|
||||
|
||||
var newToken Token
|
||||
err = autorest.Respond(resp,
|
||||
autorest.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&newToken),
|
||||
autorest.ByClosing())
|
||||
if err != nil {
|
||||
return autorest.NewErrorWithError(err,
|
||||
"azure.ServicePrincipalToken", "Refresh", resp, "Failure handling response to Service Principal %s request",
|
||||
spt.clientID)
|
||||
}
|
||||
|
||||
spt.Token = newToken
|
||||
|
||||
err = spt.InvokeRefreshCallbacks(newToken)
|
||||
if err != nil {
|
||||
// its already wrapped inside InvokeRefreshCallbacks
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAutoRefresh enables or disables automatic refreshing of stale tokens.
|
||||
func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) {
|
||||
spt.autoRefresh = autoRefresh
|
||||
}
|
||||
|
||||
// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will
|
||||
// refresh the token.
|
||||
func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) {
|
||||
spt.refreshWithin = d
|
||||
return
|
||||
}
|
||||
|
||||
// SetSender sets the autorest.Sender used when obtaining the Service Principal token. An
|
||||
// undecorated http.Client is used by default.
|
||||
func (spt *ServicePrincipalToken) SetSender(s autorest.Sender) {
|
||||
spt.sender = s
|
||||
}
|
||||
|
||||
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
||||
// value is "Bearer " followed by the AccessToken of the ServicePrincipalToken.
|
||||
//
|
||||
// By default, the token will automatically refresh if nearly expired (as determined by the
|
||||
// RefreshWithin interval). Use the AutoRefresh method to enable or disable automatically refreshing
|
||||
// tokens.
|
||||
func (spt *ServicePrincipalToken) WithAuthorization() autorest.PrepareDecorator {
|
||||
return func(p autorest.Preparer) autorest.Preparer {
|
||||
return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
if spt.autoRefresh {
|
||||
err := spt.EnsureFresh()
|
||||
if err != nil {
|
||||
return r, autorest.NewErrorWithError(err,
|
||||
"azure.ServicePrincipalToken", "WithAuthorization", nil, "Failed to refresh Service Principal Token for request to %s",
|
||||
r.URL)
|
||||
}
|
||||
}
|
||||
return (autorest.WithBearerAuthorization(spt.AccessToken)(p)).Prepare(r)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,235 @@
|
|||
package autorest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultPollingDelay is a reasonable delay between polling requests.
|
||||
DefaultPollingDelay = 60 * time.Second
|
||||
|
||||
// DefaultPollingDuration is a reasonable total polling duration.
|
||||
DefaultPollingDuration = 15 * time.Minute
|
||||
|
||||
// DefaultRetryAttempts is number of attempts for retry status codes (5xx).
|
||||
DefaultRetryAttempts = 3
|
||||
)
|
||||
|
||||
var (
|
||||
// defaultUserAgent builds a string containing the Go version, system archityecture and OS,
|
||||
// and the go-autorest version.
|
||||
defaultUserAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
|
||||
runtime.Version(),
|
||||
runtime.GOARCH,
|
||||
runtime.GOOS,
|
||||
Version(),
|
||||
)
|
||||
|
||||
statusCodesForRetry = []int{
|
||||
http.StatusRequestTimeout, // 408
|
||||
http.StatusInternalServerError, // 500
|
||||
http.StatusBadGateway, // 502
|
||||
http.StatusServiceUnavailable, // 503
|
||||
http.StatusGatewayTimeout, // 504
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
requestFormat = `HTTP Request Begin ===================================================
|
||||
%s
|
||||
===================================================== HTTP Request End
|
||||
`
|
||||
responseFormat = `HTTP Response Begin ===================================================
|
||||
%s
|
||||
===================================================== HTTP Response End
|
||||
`
|
||||
)
|
||||
|
||||
// Response serves as the base for all responses from generated clients. It provides access to the
|
||||
// last http.Response.
|
||||
type Response struct {
|
||||
*http.Response `json:"-"`
|
||||
}
|
||||
|
||||
// LoggingInspector implements request and response inspectors that log the full request and
|
||||
// response to a supplied log.
|
||||
type LoggingInspector struct {
|
||||
Logger *log.Logger
|
||||
}
|
||||
|
||||
// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The
|
||||
// body is restored after being emitted.
|
||||
//
|
||||
// Note: Since it reads the entire Body, this decorator should not be used where body streaming is
|
||||
// important. It is best used to trace JSON or similar body values.
|
||||
func (li LoggingInspector) WithInspection() PrepareDecorator {
|
||||
return func(p Preparer) Preparer {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
var body, b bytes.Buffer
|
||||
|
||||
defer r.Body.Close()
|
||||
|
||||
r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body))
|
||||
if err := r.Write(&b); err != nil {
|
||||
return nil, fmt.Errorf("Failed to write response: %v", err)
|
||||
}
|
||||
|
||||
li.Logger.Printf(requestFormat, b.String())
|
||||
|
||||
r.Body = ioutil.NopCloser(&body)
|
||||
return p.Prepare(r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The
|
||||
// body is restored after being emitted.
|
||||
//
|
||||
// Note: Since it reads the entire Body, this decorator should not be used where body streaming is
|
||||
// important. It is best used to trace JSON or similar body values.
|
||||
func (li LoggingInspector) ByInspecting() RespondDecorator {
|
||||
return func(r Responder) Responder {
|
||||
return ResponderFunc(func(resp *http.Response) error {
|
||||
var body, b bytes.Buffer
|
||||
defer resp.Body.Close()
|
||||
resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body))
|
||||
if err := resp.Write(&b); err != nil {
|
||||
return fmt.Errorf("Failed to write response: %v", err)
|
||||
}
|
||||
|
||||
li.Logger.Printf(responseFormat, b.String())
|
||||
|
||||
resp.Body = ioutil.NopCloser(&body)
|
||||
return r.Respond(resp)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Client is the base for autorest generated clients. It provides default, "do nothing"
|
||||
// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the
|
||||
// standard, undecorated http.Client as a default Sender.
|
||||
//
|
||||
// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and
|
||||
// return responses that compose with Response.
|
||||
//
|
||||
// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom
|
||||
// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit
|
||||
// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence
|
||||
// sending the request by providing a decorated Sender.
|
||||
type Client struct {
|
||||
Authorizer Authorizer
|
||||
Sender Sender
|
||||
RequestInspector PrepareDecorator
|
||||
ResponseInspector RespondDecorator
|
||||
|
||||
// PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header
|
||||
PollingDelay time.Duration
|
||||
|
||||
// PollingDuration sets the maximum polling time after which an error is returned.
|
||||
PollingDuration time.Duration
|
||||
|
||||
// RetryAttempts sets the default number of retry attempts for client.
|
||||
RetryAttempts int
|
||||
|
||||
// RetryDuration sets the delay duration for retries.
|
||||
RetryDuration time.Duration
|
||||
|
||||
// UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent
|
||||
// through the Do method.
|
||||
UserAgent string
|
||||
|
||||
Jar http.CookieJar
|
||||
}
|
||||
|
||||
// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed
|
||||
// string.
|
||||
func NewClientWithUserAgent(ua string) Client {
|
||||
c := Client{
|
||||
PollingDelay: DefaultPollingDelay,
|
||||
PollingDuration: DefaultPollingDuration,
|
||||
RetryAttempts: DefaultRetryAttempts,
|
||||
RetryDuration: 30 * time.Second,
|
||||
UserAgent: defaultUserAgent,
|
||||
}
|
||||
c.AddToUserAgent(ua)
|
||||
return c
|
||||
}
|
||||
|
||||
// AddToUserAgent adds an extension to the current user agent
|
||||
func (c *Client) AddToUserAgent(extension string) error {
|
||||
if extension != "" {
|
||||
c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent)
|
||||
}
|
||||
|
||||
// Do implements the Sender interface by invoking the active Sender after applying authorization.
|
||||
// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent
|
||||
// is set, apply set the User-Agent header.
|
||||
func (c Client) Do(r *http.Request) (*http.Response, error) {
|
||||
if r.UserAgent() == "" {
|
||||
r, _ = Prepare(r,
|
||||
WithUserAgent(c.UserAgent))
|
||||
}
|
||||
r, err := Prepare(r,
|
||||
c.WithInspection(),
|
||||
c.WithAuthorization())
|
||||
if err != nil {
|
||||
return nil, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed")
|
||||
}
|
||||
resp, err := SendWithSender(c.sender(), r,
|
||||
DoRetryForStatusCodes(c.RetryAttempts, c.RetryDuration, statusCodesForRetry...))
|
||||
Respond(resp,
|
||||
c.ByInspecting())
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// sender returns the Sender to which to send requests.
|
||||
func (c Client) sender() Sender {
|
||||
if c.Sender == nil {
|
||||
j, _ := cookiejar.New(nil)
|
||||
return &http.Client{Jar: j}
|
||||
}
|
||||
return c.Sender
|
||||
}
|
||||
|
||||
// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator
|
||||
// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer.
|
||||
func (c Client) WithAuthorization() PrepareDecorator {
|
||||
return c.authorizer().WithAuthorization()
|
||||
}
|
||||
|
||||
// authorizer returns the Authorizer to use.
|
||||
func (c Client) authorizer() Authorizer {
|
||||
if c.Authorizer == nil {
|
||||
return NullAuthorizer{}
|
||||
}
|
||||
return c.Authorizer
|
||||
}
|
||||
|
||||
// WithInspection is a convenience method that passes the request to the supplied RequestInspector,
|
||||
// if present, or returns the WithNothing PrepareDecorator otherwise.
|
||||
func (c Client) WithInspection() PrepareDecorator {
|
||||
if c.RequestInspector == nil {
|
||||
return WithNothing()
|
||||
}
|
||||
return c.RequestInspector
|
||||
}
|
||||
|
||||
// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector,
|
||||
// if present, or returns the ByIgnoring RespondDecorator otherwise.
|
||||
func (c Client) ByInspecting() RespondDecorator {
|
||||
if c.ResponseInspector == nil {
|
||||
return ByIgnoring()
|
||||
}
|
||||
return c.ResponseInspector
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
/*
|
||||
Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/)
|
||||
defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of
|
||||
time.Time types. And both convert to time.Time through a ToTime method.
|
||||
*/
|
||||
package date
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
fullDate = "2006-01-02"
|
||||
fullDateJSON = `"2006-01-02"`
|
||||
dateFormat = "%04d-%02d-%02d"
|
||||
jsonFormat = `"%04d-%02d-%02d"`
|
||||
)
|
||||
|
||||
// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e.,
|
||||
// 2006-01-02).
|
||||
type Date struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
// ParseDate create a new Date from the passed string.
|
||||
func ParseDate(date string) (d Date, err error) {
|
||||
return parseDate(date, fullDate)
|
||||
}
|
||||
|
||||
func parseDate(date string, format string) (Date, error) {
|
||||
d, err := time.Parse(format, date)
|
||||
return Date{Time: d}, err
|
||||
}
|
||||
|
||||
// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e.,
|
||||
// 2006-01-02).
|
||||
func (d Date) MarshalBinary() ([]byte, error) {
|
||||
return d.MarshalText()
|
||||
}
|
||||
|
||||
// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e.,
|
||||
// 2006-01-02).
|
||||
func (d *Date) UnmarshalBinary(data []byte) error {
|
||||
return d.UnmarshalText(data)
|
||||
}
|
||||
|
||||
// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e.,
|
||||
// 2006-01-02).
|
||||
func (d Date) MarshalJSON() (json []byte, err error) {
|
||||
return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e.,
|
||||
// 2006-01-02).
|
||||
func (d *Date) UnmarshalJSON(data []byte) (err error) {
|
||||
d.Time, err = time.Parse(fullDateJSON, string(data))
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e.,
|
||||
// 2006-01-02).
|
||||
func (d Date) MarshalText() (text []byte, err error) {
|
||||
return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil
|
||||
}
|
||||
|
||||
// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e.,
|
||||
// 2006-01-02).
|
||||
func (d *Date) UnmarshalText(data []byte) (err error) {
|
||||
d.Time, err = time.Parse(fullDate, string(data))
|
||||
return err
|
||||
}
|
||||
|
||||
// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02).
|
||||
func (d Date) String() string {
|
||||
return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())
|
||||
}
|
||||
|
||||
// ToTime returns a Date as a time.Time
|
||||
func (d Date) ToTime() time.Time {
|
||||
return d.Time
|
||||
}
|
|
@ -0,0 +1,89 @@
|
|||
package date
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases.
|
||||
const (
|
||||
azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"`
|
||||
azureUtcFormat = "2006-01-02T15:04:05.999999999"
|
||||
rfc3339JSON = `"` + time.RFC3339Nano + `"`
|
||||
rfc3339 = time.RFC3339Nano
|
||||
tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$`
|
||||
)
|
||||
|
||||
// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e.,
|
||||
// 2006-01-02T15:04:05Z).
|
||||
type Time struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e.,
|
||||
// 2006-01-02T15:04:05Z).
|
||||
func (t Time) MarshalBinary() ([]byte, error) {
|
||||
return t.Time.MarshalText()
|
||||
}
|
||||
|
||||
// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time
|
||||
// (i.e., 2006-01-02T15:04:05Z).
|
||||
func (t *Time) UnmarshalBinary(data []byte) error {
|
||||
return t.UnmarshalText(data)
|
||||
}
|
||||
|
||||
// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e.,
|
||||
// 2006-01-02T15:04:05Z).
|
||||
func (t Time) MarshalJSON() (json []byte, err error) {
|
||||
return t.Time.MarshalJSON()
|
||||
}
|
||||
|
||||
// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time
|
||||
// (i.e., 2006-01-02T15:04:05Z).
|
||||
func (t *Time) UnmarshalJSON(data []byte) (err error) {
|
||||
timeFormat := azureUtcFormatJSON
|
||||
match, err := regexp.Match(tzOffsetRegex, data)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if match {
|
||||
timeFormat = rfc3339JSON
|
||||
}
|
||||
t.Time, err = ParseTime(timeFormat, string(data))
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e.,
|
||||
// 2006-01-02T15:04:05Z).
|
||||
func (t Time) MarshalText() (text []byte, err error) {
|
||||
return t.Time.MarshalText()
|
||||
}
|
||||
|
||||
// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time
|
||||
// (i.e., 2006-01-02T15:04:05Z).
|
||||
func (t *Time) UnmarshalText(data []byte) (err error) {
|
||||
timeFormat := azureUtcFormat
|
||||
match, err := regexp.Match(tzOffsetRegex, data)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if match {
|
||||
timeFormat = rfc3339
|
||||
}
|
||||
t.Time, err = ParseTime(timeFormat, string(data))
|
||||
return err
|
||||
}
|
||||
|
||||
// String returns the Time formatted as an RFC3339 date-time string (i.e.,
|
||||
// 2006-01-02T15:04:05Z).
|
||||
func (t Time) String() string {
|
||||
// Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does.
|
||||
b, err := t.MarshalText()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// ToTime returns a Time as a time.Time
|
||||
func (t Time) ToTime() time.Time {
|
||||
return t.Time
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
package date
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
rfc1123JSON = `"` + time.RFC1123 + `"`
|
||||
rfc1123 = time.RFC1123
|
||||
)
|
||||
|
||||
// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e.,
|
||||
// Mon, 02 Jan 2006 15:04:05 MST).
|
||||
type TimeRFC1123 struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time
|
||||
// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
|
||||
func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) {
|
||||
t.Time, err = ParseTime(rfc1123JSON, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e.,
|
||||
// Mon, 02 Jan 2006 15:04:05 MST).
|
||||
func (t TimeRFC1123) MarshalJSON() ([]byte, error) {
|
||||
if y := t.Year(); y < 0 || y >= 10000 {
|
||||
return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]")
|
||||
}
|
||||
b := []byte(t.Format(rfc1123JSON))
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e.,
|
||||
// Mon, 02 Jan 2006 15:04:05 MST).
|
||||
func (t TimeRFC1123) MarshalText() ([]byte, error) {
|
||||
if y := t.Year(); y < 0 || y >= 10000 {
|
||||
return nil, errors.New("Time.MarshalText: year outside of range [0,9999]")
|
||||
}
|
||||
|
||||
b := []byte(t.Format(rfc1123))
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time
|
||||
// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
|
||||
func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) {
|
||||
t.Time, err = ParseTime(rfc1123, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e.,
|
||||
// Mon, 02 Jan 2006 15:04:05 MST).
|
||||
func (t TimeRFC1123) MarshalBinary() ([]byte, error) {
|
||||
return t.MarshalText()
|
||||
}
|
||||
|
||||
// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time
|
||||
// (i.e., Mon, 02 Jan 2006 15:04:05 MST).
|
||||
func (t *TimeRFC1123) UnmarshalBinary(data []byte) error {
|
||||
return t.UnmarshalText(data)
|
||||
}
|
||||
|
||||
// ToTime returns a Time as a time.Time
|
||||
func (t TimeRFC1123) ToTime() time.Time {
|
||||
return t.Time
|
||||
}
|
||||
|
||||
// String returns the Time formatted as an RFC1123 date-time string (i.e.,
|
||||
// Mon, 02 Jan 2006 15:04:05 MST).
|
||||
func (t TimeRFC1123) String() string {
|
||||
// Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does.
|
||||
b, err := t.MarshalText()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return string(b)
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
package date
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ParseTime to parse Time string to specified format.
|
||||
func ParseTime(format string, t string) (d time.Time, err error) {
|
||||
return time.Parse(format, strings.ToUpper(t))
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
package autorest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
const (
|
||||
// UndefinedStatusCode is used when HTTP status code is not available for an error.
|
||||
UndefinedStatusCode = 0
|
||||
)
|
||||
|
||||
// DetailedError encloses a error with details of the package, method, and associated HTTP
|
||||
// status code (if any).
|
||||
type DetailedError struct {
|
||||
Original error
|
||||
|
||||
// PackageType is the package type of the object emitting the error. For types, the value
|
||||
// matches that produced the the '%T' format specifier of the fmt package. For other elements,
|
||||
// such as functions, it is just the package name (e.g., "autorest").
|
||||
PackageType string
|
||||
|
||||
// Method is the name of the method raising the error.
|
||||
Method string
|
||||
|
||||
// StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error.
|
||||
StatusCode interface{}
|
||||
|
||||
// Message is the error message.
|
||||
Message string
|
||||
|
||||
// Service Error is the response body of failed API in bytes
|
||||
ServiceError []byte
|
||||
}
|
||||
|
||||
// NewError creates a new Error conforming object from the passed packageType, method, and
|
||||
// message. message is treated as a format string to which the optional args apply.
|
||||
func NewError(packageType string, method string, message string, args ...interface{}) DetailedError {
|
||||
return NewErrorWithError(nil, packageType, method, nil, message, args...)
|
||||
}
|
||||
|
||||
// NewErrorWithResponse creates a new Error conforming object from the passed
|
||||
// packageType, method, statusCode of the given resp (UndefinedStatusCode if
|
||||
// resp is nil), and message. message is treated as a format string to which the
|
||||
// optional args apply.
|
||||
func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError {
|
||||
return NewErrorWithError(nil, packageType, method, resp, message, args...)
|
||||
}
|
||||
|
||||
// NewErrorWithError creates a new Error conforming object from the
|
||||
// passed packageType, method, statusCode of the given resp (UndefinedStatusCode
|
||||
// if resp is nil), message, and original error. message is treated as a format
|
||||
// string to which the optional args apply.
|
||||
func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError {
|
||||
if v, ok := original.(DetailedError); ok {
|
||||
return v
|
||||
}
|
||||
|
||||
statusCode := UndefinedStatusCode
|
||||
if resp != nil {
|
||||
statusCode = resp.StatusCode
|
||||
}
|
||||
|
||||
return DetailedError{
|
||||
Original: original,
|
||||
PackageType: packageType,
|
||||
Method: method,
|
||||
StatusCode: statusCode,
|
||||
Message: fmt.Sprintf(message, args...),
|
||||
}
|
||||
}
|
||||
|
||||
// Error returns a formatted containing all available details (i.e., PackageType, Method,
|
||||
// StatusCode, Message, and original error (if any)).
|
||||
func (e DetailedError) Error() string {
|
||||
if e.Original == nil {
|
||||
return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode)
|
||||
}
|
||||
return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original)
|
||||
}
|
|
@ -0,0 +1,443 @@
|
|||
package autorest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
mimeTypeJSON = "application/json"
|
||||
mimeTypeFormPost = "application/x-www-form-urlencoded"
|
||||
|
||||
headerAuthorization = "Authorization"
|
||||
headerContentType = "Content-Type"
|
||||
headerUserAgent = "User-Agent"
|
||||
)
|
||||
|
||||
// Preparer is the interface that wraps the Prepare method.
|
||||
//
|
||||
// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations
|
||||
// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used.
|
||||
type Preparer interface {
|
||||
Prepare(*http.Request) (*http.Request, error)
|
||||
}
|
||||
|
||||
// PreparerFunc is a method that implements the Preparer interface.
|
||||
type PreparerFunc func(*http.Request) (*http.Request, error)
|
||||
|
||||
// Prepare implements the Preparer interface on PreparerFunc.
|
||||
func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) {
|
||||
return pf(r)
|
||||
}
|
||||
|
||||
// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the
|
||||
// http.Request and pass it along or, first, pass the http.Request along then affect the result.
|
||||
type PrepareDecorator func(Preparer) Preparer
|
||||
|
||||
// CreatePreparer creates, decorates, and returns a Preparer.
|
||||
// Without decorators, the returned Preparer returns the passed http.Request unmodified.
|
||||
// Preparers are safe to share and re-use.
|
||||
func CreatePreparer(decorators ...PrepareDecorator) Preparer {
|
||||
return DecoratePreparer(
|
||||
Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })),
|
||||
decorators...)
|
||||
}
|
||||
|
||||
// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it
|
||||
// applies to the Preparer. Decorators are applied in the order received, but their affect upon the
|
||||
// request depends on whether they are a pre-decorator (change the http.Request and then pass it
|
||||
// along) or a post-decorator (pass the http.Request along and alter it on return).
|
||||
func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer {
|
||||
for _, decorate := range decorators {
|
||||
p = decorate(p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators.
|
||||
// It creates a Preparer from the decorators which it then applies to the passed http.Request.
|
||||
func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) {
|
||||
if r == nil {
|
||||
return nil, NewError("autorest", "Prepare", "Invoked without an http.Request")
|
||||
}
|
||||
return CreatePreparer(decorators...).Prepare(r)
|
||||
}
|
||||
|
||||
// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed
|
||||
// http.Request.
|
||||
func WithNothing() PrepareDecorator {
|
||||
return func(p Preparer) Preparer {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
return p.Prepare(r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to
|
||||
// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before
|
||||
// adding the header.
|
||||
func WithHeader(header string, value string) PrepareDecorator {
|
||||
return func(p Preparer) Preparer {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
r, err := p.Prepare(r)
|
||||
if err == nil {
|
||||
if r.Header == nil {
|
||||
r.Header = make(http.Header)
|
||||
}
|
||||
r.Header.Set(http.CanonicalHeaderKey(header), value)
|
||||
}
|
||||
return r, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
|
||||
// value is "Bearer " followed by the supplied token.
|
||||
func WithBearerAuthorization(token string) PrepareDecorator {
|
||||
return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token))
|
||||
}
|
||||
|
||||
// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value
|
||||
// is the passed contentType.
|
||||
func AsContentType(contentType string) PrepareDecorator {
|
||||
return WithHeader(headerContentType, contentType)
|
||||
}
|
||||
|
||||
// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the
|
||||
// passed string.
|
||||
func WithUserAgent(ua string) PrepareDecorator {
|
||||
return WithHeader(headerUserAgent, ua)
|
||||
}
|
||||
|
||||
// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is
|
||||
// "application/x-www-form-urlencoded".
|
||||
func AsFormURLEncoded() PrepareDecorator {
|
||||
return AsContentType(mimeTypeFormPost)
|
||||
}
|
||||
|
||||
// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is
|
||||
// "application/json".
|
||||
func AsJSON() PrepareDecorator {
|
||||
return AsContentType(mimeTypeJSON)
|
||||
}
|
||||
|
||||
// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The
|
||||
// decorator does not validate that the passed method string is a known HTTP method.
|
||||
func WithMethod(method string) PrepareDecorator {
|
||||
return func(p Preparer) Preparer {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
r.Method = method
|
||||
return p.Prepare(r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE.
|
||||
func AsDelete() PrepareDecorator { return WithMethod("DELETE") }
|
||||
|
||||
// AsGet returns a PrepareDecorator that sets the HTTP method to GET.
|
||||
func AsGet() PrepareDecorator { return WithMethod("GET") }
|
||||
|
||||
// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD.
|
||||
func AsHead() PrepareDecorator { return WithMethod("HEAD") }
|
||||
|
||||
// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS.
|
||||
func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") }
|
||||
|
||||
// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH.
|
||||
func AsPatch() PrepareDecorator { return WithMethod("PATCH") }
|
||||
|
||||
// AsPost returns a PrepareDecorator that sets the HTTP method to POST.
|
||||
func AsPost() PrepareDecorator { return WithMethod("POST") }
|
||||
|
||||
// AsPut returns a PrepareDecorator that sets the HTTP method to PUT.
|
||||
func AsPut() PrepareDecorator { return WithMethod("PUT") }
|
||||
|
||||
// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed
|
||||
// from the supplied baseUrl.
|
||||
func WithBaseURL(baseURL string) PrepareDecorator {
|
||||
return func(p Preparer) Preparer {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
r, err := p.Prepare(r)
|
||||
if err == nil {
|
||||
var u *url.URL
|
||||
if u, err = url.Parse(baseURL); err != nil {
|
||||
return r, err
|
||||
}
|
||||
if u.Scheme == "" {
|
||||
err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL)
|
||||
}
|
||||
if err == nil {
|
||||
r.URL = u
|
||||
}
|
||||
}
|
||||
return r, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the
|
||||
// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map.
|
||||
func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator {
|
||||
parameters := ensureValueStrings(urlParameters)
|
||||
for key, value := range parameters {
|
||||
baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1)
|
||||
}
|
||||
return WithBaseURL(baseURL)
|
||||
}
|
||||
|
||||
// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the
|
||||
// http.Request body.
|
||||
func WithFormData(v url.Values) PrepareDecorator {
|
||||
return func(p Preparer) Preparer {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
r, err := p.Prepare(r)
|
||||
if err == nil {
|
||||
s := v.Encode()
|
||||
r.ContentLength = int64(len(s))
|
||||
r.Body = ioutil.NopCloser(strings.NewReader(s))
|
||||
}
|
||||
return r, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters
|
||||
// into the http.Request body.
|
||||
func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator {
|
||||
return func(p Preparer) Preparer {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
r, err := p.Prepare(r)
|
||||
if err == nil {
|
||||
var body bytes.Buffer
|
||||
writer := multipart.NewWriter(&body)
|
||||
for key, value := range formDataParameters {
|
||||
if rc, ok := value.(io.ReadCloser); ok {
|
||||
var fd io.Writer
|
||||
if fd, err = writer.CreateFormFile(key, key); err != nil {
|
||||
return r, err
|
||||
}
|
||||
if _, err = io.Copy(fd, rc); err != nil {
|
||||
return r, err
|
||||
}
|
||||
} else {
|
||||
if err = writer.WriteField(key, ensureValueString(value)); err != nil {
|
||||
return r, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err = writer.Close(); err != nil {
|
||||
return r, err
|
||||
}
|
||||
if r.Header == nil {
|
||||
r.Header = make(http.Header)
|
||||
}
|
||||
r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType())
|
||||
r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
|
||||
r.ContentLength = int64(body.Len())
|
||||
return r, err
|
||||
}
|
||||
return r, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// WithFile returns a PrepareDecorator that sends file in request body.
|
||||
func WithFile(f io.ReadCloser) PrepareDecorator {
|
||||
return func(p Preparer) Preparer {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
r, err := p.Prepare(r)
|
||||
if err == nil {
|
||||
b, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
r.Body = ioutil.NopCloser(bytes.NewReader(b))
|
||||
r.ContentLength = int64(len(b))
|
||||
}
|
||||
return r, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request
|
||||
// and sets the Content-Length header.
|
||||
func WithBool(v bool) PrepareDecorator {
|
||||
return WithString(fmt.Sprintf("%v", v))
|
||||
}
|
||||
|
||||
// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the
|
||||
// request and sets the Content-Length header.
|
||||
func WithFloat32(v float32) PrepareDecorator {
|
||||
return WithString(fmt.Sprintf("%v", v))
|
||||
}
|
||||
|
||||
// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the
|
||||
// request and sets the Content-Length header.
|
||||
func WithFloat64(v float64) PrepareDecorator {
|
||||
return WithString(fmt.Sprintf("%v", v))
|
||||
}
|
||||
|
||||
// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request
|
||||
// and sets the Content-Length header.
|
||||
func WithInt32(v int32) PrepareDecorator {
|
||||
return WithString(fmt.Sprintf("%v", v))
|
||||
}
|
||||
|
||||
// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request
|
||||
// and sets the Content-Length header.
|
||||
func WithInt64(v int64) PrepareDecorator {
|
||||
return WithString(fmt.Sprintf("%v", v))
|
||||
}
|
||||
|
||||
// WithString returns a PrepareDecorator that encodes the passed string into the body of the request
|
||||
// and sets the Content-Length header.
|
||||
func WithString(v string) PrepareDecorator {
|
||||
return func(p Preparer) Preparer {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
r, err := p.Prepare(r)
|
||||
if err == nil {
|
||||
r.ContentLength = int64(len(v))
|
||||
r.Body = ioutil.NopCloser(strings.NewReader(v))
|
||||
}
|
||||
return r, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the
|
||||
// request and sets the Content-Length header.
|
||||
func WithJSON(v interface{}) PrepareDecorator {
|
||||
return func(p Preparer) Preparer {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
r, err := p.Prepare(r)
|
||||
if err == nil {
|
||||
b, err := json.Marshal(v)
|
||||
if err == nil {
|
||||
r.ContentLength = int64(len(b))
|
||||
r.Body = ioutil.NopCloser(bytes.NewReader(b))
|
||||
}
|
||||
}
|
||||
return r, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path
|
||||
// is absolute (that is, it begins with a "/"), it replaces the existing path.
|
||||
func WithPath(path string) PrepareDecorator {
|
||||
return func(p Preparer) Preparer {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
r, err := p.Prepare(r)
|
||||
if err == nil {
|
||||
if r.URL == nil {
|
||||
return r, NewError("autorest", "WithPath", "Invoked with a nil URL")
|
||||
}
|
||||
if r.URL, err = parseURL(r.URL, path); err != nil {
|
||||
return r, err
|
||||
}
|
||||
}
|
||||
return r, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the
|
||||
// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The
|
||||
// values will be escaped (aka URL encoded) before insertion into the path.
|
||||
func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator {
|
||||
parameters := escapeValueStrings(ensureValueStrings(pathParameters))
|
||||
return func(p Preparer) Preparer {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
r, err := p.Prepare(r)
|
||||
if err == nil {
|
||||
if r.URL == nil {
|
||||
return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL")
|
||||
}
|
||||
for key, value := range parameters {
|
||||
path = strings.Replace(path, "{"+key+"}", value, -1)
|
||||
}
|
||||
if r.URL, err = parseURL(r.URL, path); err != nil {
|
||||
return r, err
|
||||
}
|
||||
}
|
||||
return r, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the
|
||||
// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map.
|
||||
func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator {
|
||||
parameters := ensureValueStrings(pathParameters)
|
||||
return func(p Preparer) Preparer {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
r, err := p.Prepare(r)
|
||||
if err == nil {
|
||||
if r.URL == nil {
|
||||
return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL")
|
||||
}
|
||||
for key, value := range parameters {
|
||||
path = strings.Replace(path, "{"+key+"}", value, -1)
|
||||
}
|
||||
|
||||
if r.URL, err = parseURL(r.URL, path); err != nil {
|
||||
return r, err
|
||||
}
|
||||
}
|
||||
return r, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func parseURL(u *url.URL, path string) (*url.URL, error) {
|
||||
p := strings.TrimRight(u.String(), "/")
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
path = "/" + path
|
||||
}
|
||||
return url.Parse(p + path)
|
||||
}
|
||||
|
||||
// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters
|
||||
// given in the supplied map (i.e., key=value).
|
||||
func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator {
|
||||
parameters := ensureValueStrings(queryParameters)
|
||||
return func(p Preparer) Preparer {
|
||||
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
|
||||
r, err := p.Prepare(r)
|
||||
if err == nil {
|
||||
if r.URL == nil {
|
||||
return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL")
|
||||
}
|
||||
v := r.URL.Query()
|
||||
for key, value := range parameters {
|
||||
v.Add(key, value)
|
||||
}
|
||||
r.URL.RawQuery = createQuery(v)
|
||||
}
|
||||
return r, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Authorizer is the interface that provides a PrepareDecorator used to supply request
|
||||
// authorization. Most often, the Authorizer decorator runs last so it has access to the full
|
||||
// state of the formed HTTP request.
|
||||
type Authorizer interface {
|
||||
WithAuthorization() PrepareDecorator
|
||||
}
|
||||
|
||||
// NullAuthorizer implements a default, "do nothing" Authorizer.
|
||||
type NullAuthorizer struct{}
|
||||
|
||||
// WithAuthorization returns a PrepareDecorator that does nothing.
|
||||
func (na NullAuthorizer) WithAuthorization() PrepareDecorator {
|
||||
return WithNothing()
|
||||
}
|
|
@ -0,0 +1,236 @@
|
|||
package autorest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Responder is the interface that wraps the Respond method.
|
||||
//
|
||||
// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold
|
||||
// state since Responders may be shared and re-used.
|
||||
type Responder interface {
|
||||
Respond(*http.Response) error
|
||||
}
|
||||
|
||||
// ResponderFunc is a method that implements the Responder interface.
|
||||
type ResponderFunc func(*http.Response) error
|
||||
|
||||
// Respond implements the Responder interface on ResponderFunc.
|
||||
func (rf ResponderFunc) Respond(r *http.Response) error {
|
||||
return rf(r)
|
||||
}
|
||||
|
||||
// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to
|
||||
// the http.Response and pass it along or, first, pass the http.Response along then react.
|
||||
type RespondDecorator func(Responder) Responder
|
||||
|
||||
// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned
|
||||
// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share
|
||||
// and re-used: It depends on the applied decorators. For example, a standard decorator that closes
|
||||
// the response body is fine to share whereas a decorator that reads the body into a passed struct
|
||||
// is not.
|
||||
//
|
||||
// To prevent memory leaks, ensure that at least one Responder closes the response body.
|
||||
func CreateResponder(decorators ...RespondDecorator) Responder {
|
||||
return DecorateResponder(
|
||||
Responder(ResponderFunc(func(r *http.Response) error { return nil })),
|
||||
decorators...)
|
||||
}
|
||||
|
||||
// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it
|
||||
// applies to the Responder. Decorators are applied in the order received, but their affect upon the
|
||||
// request depends on whether they are a pre-decorator (react to the http.Response and then pass it
|
||||
// along) or a post-decorator (pass the http.Response along and then react).
|
||||
func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder {
|
||||
for _, decorate := range decorators {
|
||||
r = decorate(r)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators.
|
||||
// It creates a Responder from the decorators it then applies to the passed http.Response.
|
||||
func Respond(r *http.Response, decorators ...RespondDecorator) error {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
return CreateResponder(decorators...).Respond(r)
|
||||
}
|
||||
|
||||
// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined
|
||||
// to the next RespondDecorator.
|
||||
func ByIgnoring() RespondDecorator {
|
||||
return func(r Responder) Responder {
|
||||
return ResponderFunc(func(resp *http.Response) error {
|
||||
return r.Respond(resp)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as
|
||||
// the Body is read.
|
||||
func ByCopying(b *bytes.Buffer) RespondDecorator {
|
||||
return func(r Responder) Responder {
|
||||
return ResponderFunc(func(resp *http.Response) error {
|
||||
err := r.Respond(resp)
|
||||
if err == nil && resp != nil && resp.Body != nil {
|
||||
resp.Body = TeeReadCloser(resp.Body, b)
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which
|
||||
// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed
|
||||
// Responder is invoked prior to discarding the response body, the decorator may occur anywhere
|
||||
// within the set.
|
||||
func ByDiscardingBody() RespondDecorator {
|
||||
return func(r Responder) Responder {
|
||||
return ResponderFunc(func(resp *http.Response) error {
|
||||
err := r.Respond(resp)
|
||||
if err == nil && resp != nil && resp.Body != nil {
|
||||
if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil {
|
||||
return fmt.Errorf("Error discarding the response body: %v", err)
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it
|
||||
// closes the response body. Since the passed Responder is invoked prior to closing the response
|
||||
// body, the decorator may occur anywhere within the set.
|
||||
func ByClosing() RespondDecorator {
|
||||
return func(r Responder) Responder {
|
||||
return ResponderFunc(func(resp *http.Response) error {
|
||||
err := r.Respond(resp)
|
||||
if resp != nil && resp.Body != nil {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
return fmt.Errorf("Error closing the response body: %v", err)
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which
|
||||
// it closes the response if the passed Responder returns an error and the response body exists.
|
||||
func ByClosingIfError() RespondDecorator {
|
||||
return func(r Responder) Responder {
|
||||
return ResponderFunc(func(resp *http.Response) error {
|
||||
err := r.Respond(resp)
|
||||
if err != nil && resp != nil && resp.Body != nil {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
return fmt.Errorf("Error closing the response body: %v", err)
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the
|
||||
// response Body into the value pointed to by v.
|
||||
func ByUnmarshallingJSON(v interface{}) RespondDecorator {
|
||||
return func(r Responder) Responder {
|
||||
return ResponderFunc(func(resp *http.Response) error {
|
||||
err := r.Respond(resp)
|
||||
if err == nil {
|
||||
b, errInner := ioutil.ReadAll(resp.Body)
|
||||
// Some responses might include a BOM, remove for successful unmarshalling
|
||||
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
||||
if errInner != nil {
|
||||
err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner)
|
||||
} else if len(strings.Trim(string(b), " ")) > 0 {
|
||||
errInner = json.Unmarshal(b, v)
|
||||
if errInner != nil {
|
||||
err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b))
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the
|
||||
// response Body into the value pointed to by v.
|
||||
func ByUnmarshallingXML(v interface{}) RespondDecorator {
|
||||
return func(r Responder) Responder {
|
||||
return ResponderFunc(func(resp *http.Response) error {
|
||||
err := r.Respond(resp)
|
||||
if err == nil {
|
||||
b, errInner := ioutil.ReadAll(resp.Body)
|
||||
if errInner != nil {
|
||||
err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner)
|
||||
} else {
|
||||
errInner = xml.Unmarshal(b, v)
|
||||
if errInner != nil {
|
||||
err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b))
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response
|
||||
// StatusCode is among the set passed. On error, response body is fully read into a buffer and
|
||||
// presented in the returned error, as well as in the response body.
|
||||
func WithErrorUnlessStatusCode(codes ...int) RespondDecorator {
|
||||
return func(r Responder) Responder {
|
||||
return ResponderFunc(func(resp *http.Response) error {
|
||||
err := r.Respond(resp)
|
||||
if err == nil && !ResponseHasStatusCode(resp, codes...) {
|
||||
derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s",
|
||||
resp.Request.Method,
|
||||
resp.Request.URL,
|
||||
resp.Status)
|
||||
if resp.Body != nil {
|
||||
defer resp.Body.Close()
|
||||
b, _ := ioutil.ReadAll(resp.Body)
|
||||
derr.ServiceError = b
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader(b))
|
||||
}
|
||||
err = derr
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is
|
||||
// anything other than HTTP 200.
|
||||
func WithErrorUnlessOK() RespondDecorator {
|
||||
return WithErrorUnlessStatusCode(http.StatusOK)
|
||||
}
|
||||
|
||||
// ExtractHeader extracts all values of the specified header from the http.Response. It returns an
|
||||
// empty string slice if the passed http.Response is nil or the header does not exist.
|
||||
func ExtractHeader(header string, resp *http.Response) []string {
|
||||
if resp != nil && resp.Header != nil {
|
||||
return resp.Header[http.CanonicalHeaderKey(header)]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It
|
||||
// returns an empty string if the passed http.Response is nil or the header does not exist.
|
||||
func ExtractHeaderValue(header string, resp *http.Response) string {
|
||||
h := ExtractHeader(header, resp)
|
||||
if len(h) > 0 {
|
||||
return h[0]
|
||||
}
|
||||
return ""
|
||||
}
|
|
@ -0,0 +1,270 @@
|
|||
package autorest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Sender is the interface that wraps the Do method to send HTTP requests.
|
||||
//
|
||||
// The standard http.Client conforms to this interface.
|
||||
type Sender interface {
|
||||
Do(*http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
// SenderFunc is a method that implements the Sender interface.
|
||||
type SenderFunc func(*http.Request) (*http.Response, error)
|
||||
|
||||
// Do implements the Sender interface on SenderFunc.
|
||||
func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
|
||||
return sf(r)
|
||||
}
|
||||
|
||||
// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the
|
||||
// http.Request and pass it along or, first, pass the http.Request along then react to the
|
||||
// http.Response result.
|
||||
type SendDecorator func(Sender) Sender
|
||||
|
||||
// CreateSender creates, decorates, and returns, as a Sender, the default http.Client.
|
||||
func CreateSender(decorators ...SendDecorator) Sender {
|
||||
return DecorateSender(&http.Client{}, decorators...)
|
||||
}
|
||||
|
||||
// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to
|
||||
// the Sender. Decorators are applied in the order received, but their affect upon the request
|
||||
// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a
|
||||
// post-decorator (pass the http.Request along and react to the results in http.Response).
|
||||
func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
|
||||
for _, decorate := range decorators {
|
||||
s = decorate(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Send sends, by means of the default http.Client, the passed http.Request, returning the
|
||||
// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which
|
||||
// it will apply the http.Client before invoking the Do method.
|
||||
//
|
||||
// Send is a convenience method and not recommended for production. Advanced users should use
|
||||
// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client).
|
||||
//
|
||||
// Send will not poll or retry requests.
|
||||
func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
|
||||
return SendWithSender(&http.Client{}, r, decorators...)
|
||||
}
|
||||
|
||||
// SendWithSender sends the passed http.Request, through the provided Sender, returning the
|
||||
// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which
|
||||
// it will apply the http.Client before invoking the Do method.
|
||||
//
|
||||
// SendWithSender will not poll or retry requests.
|
||||
func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
|
||||
return DecorateSender(s, decorators...).Do(r)
|
||||
}
|
||||
|
||||
// AfterDelay returns a SendDecorator that delays for the passed time.Duration before
|
||||
// invoking the Sender. The delay may be terminated by closing the optional channel on the
|
||||
// http.Request. If canceled, no further Senders are invoked.
|
||||
func AfterDelay(d time.Duration) SendDecorator {
|
||||
return func(s Sender) Sender {
|
||||
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
||||
if !DelayForBackoff(d, 0, r.Cancel) {
|
||||
return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay")
|
||||
}
|
||||
return s.Do(r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request.
|
||||
func AsIs() SendDecorator {
|
||||
return func(s Sender) Sender {
|
||||
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
||||
return s.Do(r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which
|
||||
// it closes the response if the passed Sender returns an error and the response body exists.
|
||||
func DoCloseIfError() SendDecorator {
|
||||
return func(s Sender) Sender {
|
||||
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
||||
resp, err := s.Do(r)
|
||||
if err != nil {
|
||||
Respond(resp, ByDiscardingBody(), ByClosing())
|
||||
}
|
||||
return resp, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is
|
||||
// among the set passed. Since these are artificial errors, the response body may still require
|
||||
// closing.
|
||||
func DoErrorIfStatusCode(codes ...int) SendDecorator {
|
||||
return func(s Sender) Sender {
|
||||
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
||||
resp, err := s.Do(r)
|
||||
if err == nil && ResponseHasStatusCode(resp, codes...) {
|
||||
err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s",
|
||||
resp.Request.Method,
|
||||
resp.Request.URL,
|
||||
resp.Status)
|
||||
}
|
||||
return resp, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response
|
||||
// StatusCode is among the set passed. Since these are artificial errors, the response body
|
||||
// may still require closing.
|
||||
func DoErrorUnlessStatusCode(codes ...int) SendDecorator {
|
||||
return func(s Sender) Sender {
|
||||
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
||||
resp, err := s.Do(r)
|
||||
if err == nil && !ResponseHasStatusCode(resp, codes...) {
|
||||
err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s",
|
||||
resp.Request.Method,
|
||||
resp.Request.URL,
|
||||
resp.Status)
|
||||
}
|
||||
return resp, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the
|
||||
// passed status codes. It expects the http.Response to contain a Location header providing the
|
||||
// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than
|
||||
// the supplied duration. It will delay between requests for the duration specified in the
|
||||
// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by
|
||||
// closing the optional channel on the http.Request.
|
||||
func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator {
|
||||
return func(s Sender) Sender {
|
||||
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
||||
resp, err = s.Do(r)
|
||||
|
||||
if err == nil && ResponseHasStatusCode(resp, codes...) {
|
||||
r, err = NewPollingRequest(resp, r.Cancel)
|
||||
|
||||
for err == nil && ResponseHasStatusCode(resp, codes...) {
|
||||
Respond(resp,
|
||||
ByDiscardingBody(),
|
||||
ByClosing())
|
||||
resp, err = SendWithSender(s, r,
|
||||
AfterDelay(GetRetryAfter(resp, delay)))
|
||||
}
|
||||
}
|
||||
|
||||
return resp, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified
|
||||
// number of attempts, exponentially backing off between requests using the supplied backoff
|
||||
// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on
|
||||
// the http.Request.
|
||||
func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator {
|
||||
return func(s Sender) Sender {
|
||||
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
||||
for attempt := 0; attempt < attempts; attempt++ {
|
||||
resp, err = s.Do(r)
|
||||
if err == nil {
|
||||
return resp, err
|
||||
}
|
||||
DelayForBackoff(backoff, attempt, r.Cancel)
|
||||
}
|
||||
return resp, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified
|
||||
// number of attempts, exponentially backing off between requests using the supplied backoff
|
||||
// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on
|
||||
// the http.Request.
|
||||
func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator {
|
||||
return func(s Sender) Sender {
|
||||
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
||||
b := []byte{}
|
||||
if r.Body != nil {
|
||||
b, err = ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
|
||||
// Increment to add the first call (attempts denotes number of retries)
|
||||
attempts++
|
||||
for attempt := 0; attempt < attempts; attempt++ {
|
||||
r.Body = ioutil.NopCloser(bytes.NewBuffer(b))
|
||||
resp, err = s.Do(r)
|
||||
if err != nil || !ResponseHasStatusCode(resp, codes...) {
|
||||
return resp, err
|
||||
}
|
||||
DelayForBackoff(backoff, attempt, r.Cancel)
|
||||
}
|
||||
return resp, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal
|
||||
// to or greater than the specified duration, exponentially backing off between requests using the
|
||||
// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the
|
||||
// optional channel on the http.Request.
|
||||
func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator {
|
||||
return func(s Sender) Sender {
|
||||
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
|
||||
end := time.Now().Add(d)
|
||||
for attempt := 0; time.Now().Before(end); attempt++ {
|
||||
resp, err = s.Do(r)
|
||||
if err == nil {
|
||||
return resp, err
|
||||
}
|
||||
DelayForBackoff(backoff, attempt, r.Cancel)
|
||||
}
|
||||
return resp, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// WithLogging returns a SendDecorator that implements simple before and after logging of the
|
||||
// request.
|
||||
func WithLogging(logger *log.Logger) SendDecorator {
|
||||
return func(s Sender) Sender {
|
||||
return SenderFunc(func(r *http.Request) (*http.Response, error) {
|
||||
logger.Printf("Sending %s %s", r.Method, r.URL)
|
||||
resp, err := s.Do(r)
|
||||
if err != nil {
|
||||
logger.Printf("%s %s received error '%v'", r.Method, r.URL, err)
|
||||
} else {
|
||||
logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status)
|
||||
}
|
||||
return resp, err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of
|
||||
// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set
|
||||
// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early,
|
||||
// returns false.
|
||||
// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt
|
||||
// count.
|
||||
func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool {
|
||||
select {
|
||||
case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second):
|
||||
return true
|
||||
case <-cancel:
|
||||
return false
|
||||
}
|
||||
}
|
|
@ -0,0 +1,178 @@
|
|||
package autorest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// EncodedAs is a series of constants specifying various data encodings
|
||||
type EncodedAs string
|
||||
|
||||
const (
|
||||
// EncodedAsJSON states that data is encoded as JSON
|
||||
EncodedAsJSON EncodedAs = "JSON"
|
||||
|
||||
// EncodedAsXML states that data is encoded as Xml
|
||||
EncodedAsXML EncodedAs = "XML"
|
||||
)
|
||||
|
||||
// Decoder defines the decoding method json.Decoder and xml.Decoder share
|
||||
type Decoder interface {
|
||||
Decode(v interface{}) error
|
||||
}
|
||||
|
||||
// NewDecoder creates a new decoder appropriate to the passed encoding.
|
||||
// encodedAs specifies the type of encoding and r supplies the io.Reader containing the
|
||||
// encoded data.
|
||||
func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder {
|
||||
if encodedAs == EncodedAsJSON {
|
||||
return json.NewDecoder(r)
|
||||
} else if encodedAs == EncodedAsXML {
|
||||
return xml.NewDecoder(r)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy
|
||||
// is especially useful if there is a chance the data will fail to decode.
|
||||
// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v
|
||||
// is the decoding destination.
|
||||
func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) {
|
||||
b := bytes.Buffer{}
|
||||
return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v)
|
||||
}
|
||||
|
||||
// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc.
|
||||
// It utilizes io.TeeReader to copy the data read and has the same behavior when reading.
|
||||
// Further, when it is closed, it ensures that rc is closed as well.
|
||||
func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser {
|
||||
return &teeReadCloser{rc, io.TeeReader(rc, w)}
|
||||
}
|
||||
|
||||
type teeReadCloser struct {
|
||||
rc io.ReadCloser
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
func (t *teeReadCloser) Read(p []byte) (int, error) {
|
||||
return t.r.Read(p)
|
||||
}
|
||||
|
||||
func (t *teeReadCloser) Close() error {
|
||||
return t.rc.Close()
|
||||
}
|
||||
|
||||
func containsInt(ints []int, n int) bool {
|
||||
for _, i := range ints {
|
||||
if i == n {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func escapeValueStrings(m map[string]string) map[string]string {
|
||||
for key, value := range m {
|
||||
m[key] = url.QueryEscape(value)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string {
|
||||
mapOfStrings := make(map[string]string)
|
||||
for key, value := range mapOfInterface {
|
||||
mapOfStrings[key] = ensureValueString(value)
|
||||
}
|
||||
return mapOfStrings
|
||||
}
|
||||
|
||||
func ensureValueString(value interface{}) string {
|
||||
if value == nil {
|
||||
return ""
|
||||
}
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
return v
|
||||
case []byte:
|
||||
return string(v)
|
||||
default:
|
||||
return fmt.Sprintf("%v", v)
|
||||
}
|
||||
}
|
||||
|
||||
// MapToValues method converts map[string]interface{} to url.Values.
|
||||
func MapToValues(m map[string]interface{}) url.Values {
|
||||
v := url.Values{}
|
||||
for key, value := range m {
|
||||
x := reflect.ValueOf(value)
|
||||
if x.Kind() == reflect.Array || x.Kind() == reflect.Slice {
|
||||
for i := 0; i < x.Len(); i++ {
|
||||
v.Add(key, ensureValueString(x.Index(i)))
|
||||
}
|
||||
} else {
|
||||
v.Add(key, ensureValueString(value))
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// String method converts interface v to string. If interface is a list, it
|
||||
// joins list elements using separator.
|
||||
func String(v interface{}, sep ...string) string {
|
||||
if len(sep) > 0 {
|
||||
return ensureValueString(strings.Join(v.([]string), sep[0]))
|
||||
}
|
||||
return ensureValueString(v)
|
||||
}
|
||||
|
||||
// Encode method encodes url path and query parameters.
|
||||
func Encode(location string, v interface{}, sep ...string) string {
|
||||
s := String(v, sep...)
|
||||
switch strings.ToLower(location) {
|
||||
case "path":
|
||||
return pathEscape(s)
|
||||
case "query":
|
||||
return queryEscape(s)
|
||||
default:
|
||||
return s
|
||||
}
|
||||
}
|
||||
|
||||
func pathEscape(s string) string {
|
||||
return strings.Replace(url.QueryEscape(s), "+", "%20", -1)
|
||||
}
|
||||
|
||||
func queryEscape(s string) string {
|
||||
return url.QueryEscape(s)
|
||||
}
|
||||
|
||||
// This method is same as Encode() method of "net/url" go package,
|
||||
// except it does not encode the query parameters because they
|
||||
// already come encoded. It formats values map in query format (bar=foo&a=b).
|
||||
func createQuery(v url.Values) string {
|
||||
var buf bytes.Buffer
|
||||
keys := make([]string, 0, len(v))
|
||||
for k := range v {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
vs := v[k]
|
||||
prefix := url.QueryEscape(k) + "="
|
||||
for _, v := range vs {
|
||||
if buf.Len() > 0 {
|
||||
buf.WriteByte('&')
|
||||
}
|
||||
buf.WriteString(prefix)
|
||||
buf.WriteString(v)
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
package autorest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
major = "7"
|
||||
minor = "3"
|
||||
patch = "0"
|
||||
tag = ""
|
||||
semVerFormat = "%s.%s.%s%s"
|
||||
)
|
||||
|
||||
var version string
|
||||
|
||||
// Version returns the semantic version (see http://semver.org).
|
||||
func Version() string {
|
||||
if version == "" {
|
||||
version = fmt.Sprintf(semVerFormat, major, minor, patch, tag)
|
||||
}
|
||||
return version
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
Copyright (c) 2012 Dave Grijalva
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
## Migration Guide from v2 -> v3
|
||||
|
||||
Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code.
|
||||
|
||||
### `Token.Claims` is now an interface type
|
||||
|
||||
The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`.
|
||||
|
||||
`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property.
|
||||
|
||||
The old example for parsing a token looked like this..
|
||||
|
||||
```go
|
||||
if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
|
||||
fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
|
||||
}
|
||||
```
|
||||
|
||||
is now directly mapped to...
|
||||
|
||||
```go
|
||||
if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
|
||||
claims := token.Claims.(jwt.MapClaims)
|
||||
fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
|
||||
}
|
||||
```
|
||||
|
||||
`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type.
|
||||
|
||||
```go
|
||||
type MyCustomClaims struct {
|
||||
User string
|
||||
*StandardClaims
|
||||
}
|
||||
|
||||
if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil {
|
||||
claims := token.Claims.(*MyCustomClaims)
|
||||
fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt)
|
||||
}
|
||||
```
|
||||
|
||||
### `ParseFromRequest` has been moved
|
||||
|
||||
To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`.
|
||||
|
||||
`Extractors` do the work of picking the token string out of a request. The interface is simple and composable.
|
||||
|
||||
This simple parsing example:
|
||||
|
||||
```go
|
||||
if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil {
|
||||
fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
|
||||
}
|
||||
```
|
||||
|
||||
is directly mapped to:
|
||||
|
||||
```go
|
||||
if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil {
|
||||
claims := token.Claims.(jwt.MapClaims)
|
||||
fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
|
||||
}
|
||||
```
|
||||
|
||||
There are several concrete `Extractor` types provided for your convenience:
|
||||
|
||||
* `HeaderExtractor` will search a list of headers until one contains content.
|
||||
* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content.
|
||||
* `MultiExtractor` will try a list of `Extractors` in order until one returns content.
|
||||
* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token.
|
||||
* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument
|
||||
* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header
|
||||
|
||||
|
||||
### RSA signing methods no longer accept `[]byte` keys
|
||||
|
||||
Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse.
|
||||
|
||||
To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types.
|
||||
|
||||
```go
|
||||
func keyLookupFunc(*Token) (interface{}, error) {
|
||||
// Don't forget to validate the alg is what you expect:
|
||||
if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {
|
||||
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
|
||||
}
|
||||
|
||||
// Look up key
|
||||
key, err := lookupPublicKey(token.Header["kid"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Unpack key from PEM encoded PKCS8
|
||||
return jwt.ParseRSAPublicKeyFromPEM(key)
|
||||
}
|
||||
```
|
|
@ -0,0 +1,85 @@
|
|||
A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
|
||||
|
||||
[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go)
|
||||
|
||||
**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
|
||||
|
||||
**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect.
|
||||
|
||||
|
||||
## What the heck is a JWT?
|
||||
|
||||
JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
|
||||
|
||||
In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way.
|
||||
|
||||
The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
|
||||
|
||||
The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own.
|
||||
|
||||
## What's in the box?
|
||||
|
||||
This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
|
||||
|
||||
## Examples
|
||||
|
||||
See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage:
|
||||
|
||||
* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac)
|
||||
* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac)
|
||||
* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples)
|
||||
|
||||
## Extensions
|
||||
|
||||
This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
|
||||
|
||||
Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go
|
||||
|
||||
## Compliance
|
||||
|
||||
This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences:
|
||||
|
||||
* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
|
||||
|
||||
## Project Status & Versioning
|
||||
|
||||
This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
|
||||
|
||||
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
|
||||
|
||||
While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning.
|
||||
|
||||
## Usage Tips
|
||||
|
||||
### Signing vs Encryption
|
||||
|
||||
A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
|
||||
|
||||
* The author of the token was in the possession of the signing secret
|
||||
* The data has not been modified since it was signed
|
||||
|
||||
It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
|
||||
|
||||
### Choosing a Signing Method
|
||||
|
||||
There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric.
|
||||
|
||||
Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
|
||||
|
||||
Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
|
||||
|
||||
### JWT and OAuth
|
||||
|
||||
It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
|
||||
|
||||
Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
|
||||
|
||||
* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
|
||||
* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
|
||||
* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
|
||||
|
||||
## More
|
||||
|
||||
Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
|
||||
|
||||
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in to documentation.
|
|
@ -0,0 +1,105 @@
|
|||
## `jwt-go` Version History
|
||||
|
||||
#### 3.0.0
|
||||
|
||||
* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
|
||||
* Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
|
||||
* `ParseFromRequest` has been moved to `request` subpackage and usage has changed
|
||||
* The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims.
|
||||
* Other Additions and Changes
|
||||
* Added `Claims` interface type to allow users to decode the claims into a custom type
|
||||
* Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into.
|
||||
* Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
|
||||
* Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
|
||||
* Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
|
||||
* Added several new, more specific, validation errors to error type bitmask
|
||||
* Moved examples from README to executable example files
|
||||
* Signing method registry is now thread safe
|
||||
* Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
|
||||
|
||||
#### 2.7.0
|
||||
|
||||
This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
|
||||
|
||||
* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
|
||||
* Error text for expired tokens includes how long it's been expired
|
||||
* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
|
||||
* Documentation updates
|
||||
|
||||
#### 2.6.0
|
||||
|
||||
* Exposed inner error within ValidationError
|
||||
* Fixed validation errors when using UseJSONNumber flag
|
||||
* Added several unit tests
|
||||
|
||||
#### 2.5.0
|
||||
|
||||
* Added support for signing method none. You shouldn't use this. The API tries to make this clear.
|
||||
* Updated/fixed some documentation
|
||||
* Added more helpful error message when trying to parse tokens that begin with `BEARER `
|
||||
|
||||
#### 2.4.0
|
||||
|
||||
* Added new type, Parser, to allow for configuration of various parsing parameters
|
||||
* You can now specify a list of valid signing methods. Anything outside this set will be rejected.
|
||||
* You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
|
||||
* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
|
||||
* Fixed some bugs with ECDSA parsing
|
||||
|
||||
#### 2.3.0
|
||||
|
||||
* Added support for ECDSA signing methods
|
||||
* Added support for RSA PSS signing methods (requires go v1.4)
|
||||
|
||||
#### 2.2.0
|
||||
|
||||
* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
|
||||
|
||||
#### 2.1.0
|
||||
|
||||
Backwards compatible API change that was missed in 2.0.0.
|
||||
|
||||
* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
|
||||
|
||||
#### 2.0.0
|
||||
|
||||
There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
|
||||
|
||||
The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
|
||||
|
||||
It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
|
||||
|
||||
* **Compatibility Breaking Changes**
|
||||
* `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
|
||||
* `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
|
||||
* `KeyFunc` now returns `interface{}` instead of `[]byte`
|
||||
* `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
|
||||
* `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
|
||||
* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
|
||||
* Added public package global `SigningMethodHS256`
|
||||
* Added public package global `SigningMethodHS384`
|
||||
* Added public package global `SigningMethodHS512`
|
||||
* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
|
||||
* Added public package global `SigningMethodRS256`
|
||||
* Added public package global `SigningMethodRS384`
|
||||
* Added public package global `SigningMethodRS512`
|
||||
* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
|
||||
* Refactored the RSA implementation to be easier to read
|
||||
* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
|
||||
|
||||
#### 1.0.2
|
||||
|
||||
* Fixed bug in parsing public keys from certificates
|
||||
* Added more tests around the parsing of keys for RS256
|
||||
* Code refactoring in RS256 implementation. No functional changes
|
||||
|
||||
#### 1.0.1
|
||||
|
||||
* Fixed panic if RS256 signing method was passed an invalid key
|
||||
|
||||
#### 1.0.0
|
||||
|
||||
* First versioned release
|
||||
* API stabilized
|
||||
* Supports creating, signing, parsing, and validating JWT tokens
|
||||
* Supports RS256 and HS256 signing methods
|
|
@ -0,0 +1,134 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"crypto/subtle"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// For a type to be a Claims object, it must just have a Valid method that determines
|
||||
// if the token is invalid for any supported reason
|
||||
type Claims interface {
|
||||
Valid() error
|
||||
}
|
||||
|
||||
// Structured version of Claims Section, as referenced at
|
||||
// https://tools.ietf.org/html/rfc7519#section-4.1
|
||||
// See examples for how to use this with your own claim types
|
||||
type StandardClaims struct {
|
||||
Audience string `json:"aud,omitempty"`
|
||||
ExpiresAt int64 `json:"exp,omitempty"`
|
||||
Id string `json:"jti,omitempty"`
|
||||
IssuedAt int64 `json:"iat,omitempty"`
|
||||
Issuer string `json:"iss,omitempty"`
|
||||
NotBefore int64 `json:"nbf,omitempty"`
|
||||
Subject string `json:"sub,omitempty"`
|
||||
}
|
||||
|
||||
// Validates time based claims "exp, iat, nbf".
|
||||
// There is no accounting for clock skew.
|
||||
// As well, if any of the above claims are not in the token, it will still
|
||||
// be considered a valid claim.
|
||||
func (c StandardClaims) Valid() error {
|
||||
vErr := new(ValidationError)
|
||||
now := TimeFunc().Unix()
|
||||
|
||||
// The claims below are optional, by default, so if they are set to the
|
||||
// default value in Go, let's not fail the verification for them.
|
||||
if c.VerifyExpiresAt(now, false) == false {
|
||||
delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
|
||||
vErr.Inner = fmt.Errorf("token is expired by %v", delta)
|
||||
vErr.Errors |= ValidationErrorExpired
|
||||
}
|
||||
|
||||
if c.VerifyIssuedAt(now, false) == false {
|
||||
vErr.Inner = fmt.Errorf("Token used before issued")
|
||||
vErr.Errors |= ValidationErrorIssuedAt
|
||||
}
|
||||
|
||||
if c.VerifyNotBefore(now, false) == false {
|
||||
vErr.Inner = fmt.Errorf("token is not valid yet")
|
||||
vErr.Errors |= ValidationErrorNotValidYet
|
||||
}
|
||||
|
||||
if vErr.valid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return vErr
|
||||
}
|
||||
|
||||
// Compares the aud claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
|
||||
return verifyAud(c.Audience, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the exp claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
|
||||
return verifyExp(c.ExpiresAt, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the iat claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
|
||||
return verifyIat(c.IssuedAt, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the iss claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
|
||||
return verifyIss(c.Issuer, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the nbf claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
|
||||
return verifyNbf(c.NotBefore, cmp, req)
|
||||
}
|
||||
|
||||
// ----- helpers
|
||||
|
||||
func verifyAud(aud string, cmp string, required bool) bool {
|
||||
if aud == "" {
|
||||
return !required
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 {
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func verifyExp(exp int64, now int64, required bool) bool {
|
||||
if exp == 0 {
|
||||
return !required
|
||||
}
|
||||
return now <= exp
|
||||
}
|
||||
|
||||
func verifyIat(iat int64, now int64, required bool) bool {
|
||||
if iat == 0 {
|
||||
return !required
|
||||
}
|
||||
return now >= iat
|
||||
}
|
||||
|
||||
func verifyIss(iss string, cmp string, required bool) bool {
|
||||
if iss == "" {
|
||||
return !required
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 {
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func verifyNbf(nbf int64, now int64, required bool) bool {
|
||||
if nbf == 0 {
|
||||
return !required
|
||||
}
|
||||
return now >= nbf
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
|
||||
//
|
||||
// See README.md for more info.
|
||||
package jwt
|
|
@ -0,0 +1,147 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
var (
|
||||
// Sadly this is missing from crypto/ecdsa compared to crypto/rsa
|
||||
ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
|
||||
)
|
||||
|
||||
// Implements the ECDSA family of signing methods signing methods
|
||||
type SigningMethodECDSA struct {
|
||||
Name string
|
||||
Hash crypto.Hash
|
||||
KeySize int
|
||||
CurveBits int
|
||||
}
|
||||
|
||||
// Specific instances for EC256 and company
|
||||
var (
|
||||
SigningMethodES256 *SigningMethodECDSA
|
||||
SigningMethodES384 *SigningMethodECDSA
|
||||
SigningMethodES512 *SigningMethodECDSA
|
||||
)
|
||||
|
||||
func init() {
|
||||
// ES256
|
||||
SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
|
||||
RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
|
||||
return SigningMethodES256
|
||||
})
|
||||
|
||||
// ES384
|
||||
SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
|
||||
RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
|
||||
return SigningMethodES384
|
||||
})
|
||||
|
||||
// ES512
|
||||
SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
|
||||
RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
|
||||
return SigningMethodES512
|
||||
})
|
||||
}
|
||||
|
||||
func (m *SigningMethodECDSA) Alg() string {
|
||||
return m.Name
|
||||
}
|
||||
|
||||
// Implements the Verify method from SigningMethod
|
||||
// For this verify method, key must be an ecdsa.PublicKey struct
|
||||
func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
|
||||
var err error
|
||||
|
||||
// Decode the signature
|
||||
var sig []byte
|
||||
if sig, err = DecodeSegment(signature); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the key
|
||||
var ecdsaKey *ecdsa.PublicKey
|
||||
switch k := key.(type) {
|
||||
case *ecdsa.PublicKey:
|
||||
ecdsaKey = k
|
||||
default:
|
||||
return ErrInvalidKeyType
|
||||
}
|
||||
|
||||
if len(sig) != 2*m.KeySize {
|
||||
return ErrECDSAVerification
|
||||
}
|
||||
|
||||
r := big.NewInt(0).SetBytes(sig[:m.KeySize])
|
||||
s := big.NewInt(0).SetBytes(sig[m.KeySize:])
|
||||
|
||||
// Create hasher
|
||||
if !m.Hash.Available() {
|
||||
return ErrHashUnavailable
|
||||
}
|
||||
hasher := m.Hash.New()
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
// Verify the signature
|
||||
if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true {
|
||||
return nil
|
||||
} else {
|
||||
return ErrECDSAVerification
|
||||
}
|
||||
}
|
||||
|
||||
// Implements the Sign method from SigningMethod
|
||||
// For this signing method, key must be an ecdsa.PrivateKey struct
|
||||
func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
|
||||
// Get the key
|
||||
var ecdsaKey *ecdsa.PrivateKey
|
||||
switch k := key.(type) {
|
||||
case *ecdsa.PrivateKey:
|
||||
ecdsaKey = k
|
||||
default:
|
||||
return "", ErrInvalidKeyType
|
||||
}
|
||||
|
||||
// Create the hasher
|
||||
if !m.Hash.Available() {
|
||||
return "", ErrHashUnavailable
|
||||
}
|
||||
|
||||
hasher := m.Hash.New()
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
// Sign the string and return r, s
|
||||
if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
|
||||
curveBits := ecdsaKey.Curve.Params().BitSize
|
||||
|
||||
if m.CurveBits != curveBits {
|
||||
return "", ErrInvalidKey
|
||||
}
|
||||
|
||||
keyBytes := curveBits / 8
|
||||
if curveBits%8 > 0 {
|
||||
keyBytes += 1
|
||||
}
|
||||
|
||||
// We serialize the outpus (r and s) into big-endian byte arrays and pad
|
||||
// them with zeros on the left to make sure the sizes work out. Both arrays
|
||||
// must be keyBytes long, and the output must be 2*keyBytes long.
|
||||
rBytes := r.Bytes()
|
||||
rBytesPadded := make([]byte, keyBytes)
|
||||
copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
|
||||
|
||||
sBytes := s.Bytes()
|
||||
sBytesPadded := make([]byte, keyBytes)
|
||||
copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
|
||||
|
||||
out := append(rBytesPadded, sBytesPadded...)
|
||||
|
||||
return EncodeSegment(out), nil
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key")
|
||||
ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key")
|
||||
)
|
||||
|
||||
// Parse PEM encoded Elliptic Curve Private Key Structure
|
||||
func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
|
||||
var err error
|
||||
|
||||
// Parse PEM block
|
||||
var block *pem.Block
|
||||
if block, _ = pem.Decode(key); block == nil {
|
||||
return nil, ErrKeyMustBePEMEncoded
|
||||
}
|
||||
|
||||
// Parse the key
|
||||
var parsedKey interface{}
|
||||
if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pkey *ecdsa.PrivateKey
|
||||
var ok bool
|
||||
if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
|
||||
return nil, ErrNotECPrivateKey
|
||||
}
|
||||
|
||||
return pkey, nil
|
||||
}
|
||||
|
||||
// Parse PEM encoded PKCS1 or PKCS8 public key
|
||||
func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
|
||||
var err error
|
||||
|
||||
// Parse PEM block
|
||||
var block *pem.Block
|
||||
if block, _ = pem.Decode(key); block == nil {
|
||||
return nil, ErrKeyMustBePEMEncoded
|
||||
}
|
||||
|
||||
// Parse the key
|
||||
var parsedKey interface{}
|
||||
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
|
||||
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
|
||||
parsedKey = cert.PublicKey
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var pkey *ecdsa.PublicKey
|
||||
var ok bool
|
||||
if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
|
||||
return nil, ErrNotECPublicKey
|
||||
}
|
||||
|
||||
return pkey, nil
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Error constants
|
||||
var (
|
||||
ErrInvalidKey = errors.New("key is invalid")
|
||||
ErrInvalidKeyType = errors.New("key is of invalid type")
|
||||
ErrHashUnavailable = errors.New("the requested hash function is unavailable")
|
||||
)
|
||||
|
||||
// The errors that might occur when parsing and validating a token
|
||||
const (
|
||||
ValidationErrorMalformed uint32 = 1 << iota // Token is malformed
|
||||
ValidationErrorUnverifiable // Token could not be verified because of signing problems
|
||||
ValidationErrorSignatureInvalid // Signature validation failed
|
||||
|
||||
// Standard Claim validation errors
|
||||
ValidationErrorAudience // AUD validation failed
|
||||
ValidationErrorExpired // EXP validation failed
|
||||
ValidationErrorIssuedAt // IAT validation failed
|
||||
ValidationErrorIssuer // ISS validation failed
|
||||
ValidationErrorNotValidYet // NBF validation failed
|
||||
ValidationErrorId // JTI validation failed
|
||||
ValidationErrorClaimsInvalid // Generic claims validation error
|
||||
)
|
||||
|
||||
// Helper for constructing a ValidationError with a string error message
|
||||
func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
|
||||
return &ValidationError{
|
||||
text: errorText,
|
||||
Errors: errorFlags,
|
||||
}
|
||||
}
|
||||
|
||||
// The error from Parse if token is not valid
|
||||
type ValidationError struct {
|
||||
Inner error // stores the error returned by external dependencies, i.e.: KeyFunc
|
||||
Errors uint32 // bitfield. see ValidationError... constants
|
||||
text string // errors that do not have a valid error just have text
|
||||
}
|
||||
|
||||
// Validation error is an error type
|
||||
func (e ValidationError) Error() string {
|
||||
if e.Inner != nil {
|
||||
return e.Inner.Error()
|
||||
} else if e.text != "" {
|
||||
return e.text
|
||||
} else {
|
||||
return "token is invalid"
|
||||
}
|
||||
}
|
||||
|
||||
// No errors
|
||||
func (e *ValidationError) valid() bool {
|
||||
return e.Errors == 0
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/hmac"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Implements the HMAC-SHA family of signing methods signing methods
|
||||
type SigningMethodHMAC struct {
|
||||
Name string
|
||||
Hash crypto.Hash
|
||||
}
|
||||
|
||||
// Specific instances for HS256 and company
|
||||
var (
|
||||
SigningMethodHS256 *SigningMethodHMAC
|
||||
SigningMethodHS384 *SigningMethodHMAC
|
||||
SigningMethodHS512 *SigningMethodHMAC
|
||||
ErrSignatureInvalid = errors.New("signature is invalid")
|
||||
)
|
||||
|
||||
func init() {
|
||||
// HS256
|
||||
SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
|
||||
RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
|
||||
return SigningMethodHS256
|
||||
})
|
||||
|
||||
// HS384
|
||||
SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
|
||||
RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
|
||||
return SigningMethodHS384
|
||||
})
|
||||
|
||||
// HS512
|
||||
SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
|
||||
RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
|
||||
return SigningMethodHS512
|
||||
})
|
||||
}
|
||||
|
||||
func (m *SigningMethodHMAC) Alg() string {
|
||||
return m.Name
|
||||
}
|
||||
|
||||
// Verify the signature of HSXXX tokens. Returns nil if the signature is valid.
|
||||
func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
|
||||
// Verify the key is the right type
|
||||
keyBytes, ok := key.([]byte)
|
||||
if !ok {
|
||||
return ErrInvalidKeyType
|
||||
}
|
||||
|
||||
// Decode signature, for comparison
|
||||
sig, err := DecodeSegment(signature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Can we use the specified hashing method?
|
||||
if !m.Hash.Available() {
|
||||
return ErrHashUnavailable
|
||||
}
|
||||
|
||||
// This signing method is symmetric, so we validate the signature
|
||||
// by reproducing the signature from the signing string and key, then
|
||||
// comparing that against the provided signature.
|
||||
hasher := hmac.New(m.Hash.New, keyBytes)
|
||||
hasher.Write([]byte(signingString))
|
||||
if !hmac.Equal(sig, hasher.Sum(nil)) {
|
||||
return ErrSignatureInvalid
|
||||
}
|
||||
|
||||
// No validation errors. Signature is good.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements the Sign method from SigningMethod for this signing method.
|
||||
// Key must be []byte
|
||||
func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
|
||||
if keyBytes, ok := key.([]byte); ok {
|
||||
if !m.Hash.Available() {
|
||||
return "", ErrHashUnavailable
|
||||
}
|
||||
|
||||
hasher := hmac.New(m.Hash.New, keyBytes)
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
return EncodeSegment(hasher.Sum(nil)), nil
|
||||
}
|
||||
|
||||
return "", ErrInvalidKey
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
// "fmt"
|
||||
)
|
||||
|
||||
// Claims type that uses the map[string]interface{} for JSON decoding
|
||||
// This is the default claims type if you don't supply one
|
||||
type MapClaims map[string]interface{}
|
||||
|
||||
// Compares the aud claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
|
||||
aud, _ := m["aud"].(string)
|
||||
return verifyAud(aud, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the exp claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
|
||||
switch exp := m["exp"].(type) {
|
||||
case float64:
|
||||
return verifyExp(int64(exp), cmp, req)
|
||||
case json.Number:
|
||||
v, _ := exp.Int64()
|
||||
return verifyExp(v, cmp, req)
|
||||
}
|
||||
return req == false
|
||||
}
|
||||
|
||||
// Compares the iat claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
|
||||
switch iat := m["iat"].(type) {
|
||||
case float64:
|
||||
return verifyIat(int64(iat), cmp, req)
|
||||
case json.Number:
|
||||
v, _ := iat.Int64()
|
||||
return verifyIat(v, cmp, req)
|
||||
}
|
||||
return req == false
|
||||
}
|
||||
|
||||
// Compares the iss claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
|
||||
iss, _ := m["iss"].(string)
|
||||
return verifyIss(iss, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the nbf claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
|
||||
switch nbf := m["nbf"].(type) {
|
||||
case float64:
|
||||
return verifyNbf(int64(nbf), cmp, req)
|
||||
case json.Number:
|
||||
v, _ := nbf.Int64()
|
||||
return verifyNbf(v, cmp, req)
|
||||
}
|
||||
return req == false
|
||||
}
|
||||
|
||||
// Validates time based claims "exp, iat, nbf".
|
||||
// There is no accounting for clock skew.
|
||||
// As well, if any of the above claims are not in the token, it will still
|
||||
// be considered a valid claim.
|
||||
func (m MapClaims) Valid() error {
|
||||
vErr := new(ValidationError)
|
||||
now := TimeFunc().Unix()
|
||||
|
||||
if m.VerifyExpiresAt(now, false) == false {
|
||||
vErr.Inner = errors.New("Token is expired")
|
||||
vErr.Errors |= ValidationErrorExpired
|
||||
}
|
||||
|
||||
if m.VerifyIssuedAt(now, false) == false {
|
||||
vErr.Inner = errors.New("Token used before issued")
|
||||
vErr.Errors |= ValidationErrorIssuedAt
|
||||
}
|
||||
|
||||
if m.VerifyNotBefore(now, false) == false {
|
||||
vErr.Inner = errors.New("Token is not valid yet")
|
||||
vErr.Errors |= ValidationErrorNotValidYet
|
||||
}
|
||||
|
||||
if vErr.valid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return vErr
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
package jwt
|
||||
|
||||
// Implements the none signing method. This is required by the spec
|
||||
// but you probably should never use it.
|
||||
var SigningMethodNone *signingMethodNone
|
||||
|
||||
const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
|
||||
|
||||
var NoneSignatureTypeDisallowedError error
|
||||
|
||||
type signingMethodNone struct{}
|
||||
type unsafeNoneMagicConstant string
|
||||
|
||||
func init() {
|
||||
SigningMethodNone = &signingMethodNone{}
|
||||
NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid)
|
||||
|
||||
RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
|
||||
return SigningMethodNone
|
||||
})
|
||||
}
|
||||
|
||||
func (m *signingMethodNone) Alg() string {
|
||||
return "none"
|
||||
}
|
||||
|
||||
// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
|
||||
func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) {
|
||||
// Key must be UnsafeAllowNoneSignatureType to prevent accidentally
|
||||
// accepting 'none' signing method
|
||||
if _, ok := key.(unsafeNoneMagicConstant); !ok {
|
||||
return NoneSignatureTypeDisallowedError
|
||||
}
|
||||
// If signing method is none, signature must be an empty string
|
||||
if signature != "" {
|
||||
return NewValidationError(
|
||||
"'none' signing method with non-empty signature",
|
||||
ValidationErrorSignatureInvalid,
|
||||
)
|
||||
}
|
||||
|
||||
// Accept 'none' signing method.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
|
||||
func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) {
|
||||
if _, ok := key.(unsafeNoneMagicConstant); ok {
|
||||
return "", nil
|
||||
}
|
||||
return "", NoneSignatureTypeDisallowedError
|
||||
}
|
|
@ -0,0 +1,131 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Parser struct {
|
||||
ValidMethods []string // If populated, only these methods will be considered valid
|
||||
UseJSONNumber bool // Use JSON Number format in JSON decoder
|
||||
SkipClaimsValidation bool // Skip claims validation during token parsing
|
||||
}
|
||||
|
||||
// Parse, validate, and return a token.
|
||||
// keyFunc will receive the parsed token and should return the key for validating.
|
||||
// If everything is kosher, err will be nil
|
||||
func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
|
||||
return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
|
||||
}
|
||||
|
||||
func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
|
||||
parts := strings.Split(tokenString, ".")
|
||||
if len(parts) != 3 {
|
||||
return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
|
||||
}
|
||||
|
||||
var err error
|
||||
token := &Token{Raw: tokenString}
|
||||
|
||||
// parse Header
|
||||
var headerBytes []byte
|
||||
if headerBytes, err = DecodeSegment(parts[0]); err != nil {
|
||||
if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
|
||||
return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
|
||||
}
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
|
||||
// parse Claims
|
||||
var claimBytes []byte
|
||||
token.Claims = claims
|
||||
|
||||
if claimBytes, err = DecodeSegment(parts[1]); err != nil {
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
|
||||
if p.UseJSONNumber {
|
||||
dec.UseNumber()
|
||||
}
|
||||
// JSON Decode. Special case for map type to avoid weird pointer behavior
|
||||
if c, ok := token.Claims.(MapClaims); ok {
|
||||
err = dec.Decode(&c)
|
||||
} else {
|
||||
err = dec.Decode(&claims)
|
||||
}
|
||||
// Handle decode error
|
||||
if err != nil {
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
|
||||
// Lookup signature method
|
||||
if method, ok := token.Header["alg"].(string); ok {
|
||||
if token.Method = GetSigningMethod(method); token.Method == nil {
|
||||
return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
|
||||
}
|
||||
} else {
|
||||
return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
|
||||
}
|
||||
|
||||
// Verify signing method is in the required set
|
||||
if p.ValidMethods != nil {
|
||||
var signingMethodValid = false
|
||||
var alg = token.Method.Alg()
|
||||
for _, m := range p.ValidMethods {
|
||||
if m == alg {
|
||||
signingMethodValid = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !signingMethodValid {
|
||||
// signing method is not in the listed set
|
||||
return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup key
|
||||
var key interface{}
|
||||
if keyFunc == nil {
|
||||
// keyFunc was not provided. short circuiting validation
|
||||
return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
|
||||
}
|
||||
if key, err = keyFunc(token); err != nil {
|
||||
// keyFunc returned an error
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
|
||||
}
|
||||
|
||||
vErr := &ValidationError{}
|
||||
|
||||
// Validate Claims
|
||||
if !p.SkipClaimsValidation {
|
||||
if err := token.Claims.Valid(); err != nil {
|
||||
|
||||
// If the Claims Valid returned an error, check if it is a validation error,
|
||||
// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
|
||||
if e, ok := err.(*ValidationError); !ok {
|
||||
vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
|
||||
} else {
|
||||
vErr = e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Perform validation
|
||||
token.Signature = parts[2]
|
||||
if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
|
||||
vErr.Inner = err
|
||||
vErr.Errors |= ValidationErrorSignatureInvalid
|
||||
}
|
||||
|
||||
if vErr.valid() {
|
||||
token.Valid = true
|
||||
return token, nil
|
||||
}
|
||||
|
||||
return token, vErr
|
||||
}
|
|
@ -0,0 +1,100 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
)
|
||||
|
||||
// Implements the RSA family of signing methods signing methods
|
||||
type SigningMethodRSA struct {
|
||||
Name string
|
||||
Hash crypto.Hash
|
||||
}
|
||||
|
||||
// Specific instances for RS256 and company
|
||||
var (
|
||||
SigningMethodRS256 *SigningMethodRSA
|
||||
SigningMethodRS384 *SigningMethodRSA
|
||||
SigningMethodRS512 *SigningMethodRSA
|
||||
)
|
||||
|
||||
func init() {
|
||||
// RS256
|
||||
SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
|
||||
RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
|
||||
return SigningMethodRS256
|
||||
})
|
||||
|
||||
// RS384
|
||||
SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
|
||||
RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
|
||||
return SigningMethodRS384
|
||||
})
|
||||
|
||||
// RS512
|
||||
SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
|
||||
RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
|
||||
return SigningMethodRS512
|
||||
})
|
||||
}
|
||||
|
||||
func (m *SigningMethodRSA) Alg() string {
|
||||
return m.Name
|
||||
}
|
||||
|
||||
// Implements the Verify method from SigningMethod
|
||||
// For this signing method, must be an rsa.PublicKey structure.
|
||||
func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
|
||||
var err error
|
||||
|
||||
// Decode the signature
|
||||
var sig []byte
|
||||
if sig, err = DecodeSegment(signature); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var rsaKey *rsa.PublicKey
|
||||
var ok bool
|
||||
|
||||
if rsaKey, ok = key.(*rsa.PublicKey); !ok {
|
||||
return ErrInvalidKeyType
|
||||
}
|
||||
|
||||
// Create hasher
|
||||
if !m.Hash.Available() {
|
||||
return ErrHashUnavailable
|
||||
}
|
||||
hasher := m.Hash.New()
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
// Verify the signature
|
||||
return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
|
||||
}
|
||||
|
||||
// Implements the Sign method from SigningMethod
|
||||
// For this signing method, must be an rsa.PrivateKey structure.
|
||||
func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
|
||||
var rsaKey *rsa.PrivateKey
|
||||
var ok bool
|
||||
|
||||
// Validate type of key
|
||||
if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
|
||||
return "", ErrInvalidKey
|
||||
}
|
||||
|
||||
// Create the hasher
|
||||
if !m.Hash.Available() {
|
||||
return "", ErrHashUnavailable
|
||||
}
|
||||
|
||||
hasher := m.Hash.New()
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
// Sign the string and return the encoded bytes
|
||||
if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
|
||||
return EncodeSegment(sigBytes), nil
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
|
@ -0,0 +1,126 @@
|
|||
// +build go1.4
|
||||
|
||||
package jwt
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
)
|
||||
|
||||
// Implements the RSAPSS family of signing methods signing methods
|
||||
type SigningMethodRSAPSS struct {
|
||||
*SigningMethodRSA
|
||||
Options *rsa.PSSOptions
|
||||
}
|
||||
|
||||
// Specific instances for RS/PS and company
|
||||
var (
|
||||
SigningMethodPS256 *SigningMethodRSAPSS
|
||||
SigningMethodPS384 *SigningMethodRSAPSS
|
||||
SigningMethodPS512 *SigningMethodRSAPSS
|
||||
)
|
||||
|
||||
func init() {
|
||||
// PS256
|
||||
SigningMethodPS256 = &SigningMethodRSAPSS{
|
||||
&SigningMethodRSA{
|
||||
Name: "PS256",
|
||||
Hash: crypto.SHA256,
|
||||
},
|
||||
&rsa.PSSOptions{
|
||||
SaltLength: rsa.PSSSaltLengthAuto,
|
||||
Hash: crypto.SHA256,
|
||||
},
|
||||
}
|
||||
RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
|
||||
return SigningMethodPS256
|
||||
})
|
||||
|
||||
// PS384
|
||||
SigningMethodPS384 = &SigningMethodRSAPSS{
|
||||
&SigningMethodRSA{
|
||||
Name: "PS384",
|
||||
Hash: crypto.SHA384,
|
||||
},
|
||||
&rsa.PSSOptions{
|
||||
SaltLength: rsa.PSSSaltLengthAuto,
|
||||
Hash: crypto.SHA384,
|
||||
},
|
||||
}
|
||||
RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
|
||||
return SigningMethodPS384
|
||||
})
|
||||
|
||||
// PS512
|
||||
SigningMethodPS512 = &SigningMethodRSAPSS{
|
||||
&SigningMethodRSA{
|
||||
Name: "PS512",
|
||||
Hash: crypto.SHA512,
|
||||
},
|
||||
&rsa.PSSOptions{
|
||||
SaltLength: rsa.PSSSaltLengthAuto,
|
||||
Hash: crypto.SHA512,
|
||||
},
|
||||
}
|
||||
RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
|
||||
return SigningMethodPS512
|
||||
})
|
||||
}
|
||||
|
||||
// Implements the Verify method from SigningMethod
|
||||
// For this verify method, key must be an rsa.PublicKey struct
|
||||
func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
|
||||
var err error
|
||||
|
||||
// Decode the signature
|
||||
var sig []byte
|
||||
if sig, err = DecodeSegment(signature); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var rsaKey *rsa.PublicKey
|
||||
switch k := key.(type) {
|
||||
case *rsa.PublicKey:
|
||||
rsaKey = k
|
||||
default:
|
||||
return ErrInvalidKey
|
||||
}
|
||||
|
||||
// Create hasher
|
||||
if !m.Hash.Available() {
|
||||
return ErrHashUnavailable
|
||||
}
|
||||
hasher := m.Hash.New()
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options)
|
||||
}
|
||||
|
||||
// Implements the Sign method from SigningMethod
|
||||
// For this signing method, key must be an rsa.PrivateKey struct
|
||||
func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
|
||||
var rsaKey *rsa.PrivateKey
|
||||
|
||||
switch k := key.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
rsaKey = k
|
||||
default:
|
||||
return "", ErrInvalidKeyType
|
||||
}
|
||||
|
||||
// Create the hasher
|
||||
if !m.Hash.Available() {
|
||||
return "", ErrHashUnavailable
|
||||
}
|
||||
|
||||
hasher := m.Hash.New()
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
// Sign the string and return the encoded bytes
|
||||
if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
|
||||
return EncodeSegment(sigBytes), nil
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key")
|
||||
ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key")
|
||||
ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key")
|
||||
)
|
||||
|
||||
// Parse PEM encoded PKCS1 or PKCS8 private key
|
||||
func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
|
||||
var err error
|
||||
|
||||
// Parse PEM block
|
||||
var block *pem.Block
|
||||
if block, _ = pem.Decode(key); block == nil {
|
||||
return nil, ErrKeyMustBePEMEncoded
|
||||
}
|
||||
|
||||
var parsedKey interface{}
|
||||
if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
|
||||
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var pkey *rsa.PrivateKey
|
||||
var ok bool
|
||||
if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
|
||||
return nil, ErrNotRSAPrivateKey
|
||||
}
|
||||
|
||||
return pkey, nil
|
||||
}
|
||||
|
||||
// Parse PEM encoded PKCS1 or PKCS8 public key
|
||||
func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
|
||||
var err error
|
||||
|
||||
// Parse PEM block
|
||||
var block *pem.Block
|
||||
if block, _ = pem.Decode(key); block == nil {
|
||||
return nil, ErrKeyMustBePEMEncoded
|
||||
}
|
||||
|
||||
// Parse the key
|
||||
var parsedKey interface{}
|
||||
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
|
||||
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
|
||||
parsedKey = cert.PublicKey
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var pkey *rsa.PublicKey
|
||||
var ok bool
|
||||
if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
|
||||
return nil, ErrNotRSAPublicKey
|
||||
}
|
||||
|
||||
return pkey, nil
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var signingMethods = map[string]func() SigningMethod{}
|
||||
var signingMethodLock = new(sync.RWMutex)
|
||||
|
||||
// Implement SigningMethod to add new methods for signing or verifying tokens.
|
||||
type SigningMethod interface {
|
||||
Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid
|
||||
Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error
|
||||
Alg() string // returns the alg identifier for this method (example: 'HS256')
|
||||
}
|
||||
|
||||
// Register the "alg" name and a factory function for signing method.
|
||||
// This is typically done during init() in the method's implementation
|
||||
func RegisterSigningMethod(alg string, f func() SigningMethod) {
|
||||
signingMethodLock.Lock()
|
||||
defer signingMethodLock.Unlock()
|
||||
|
||||
signingMethods[alg] = f
|
||||
}
|
||||
|
||||
// Get a signing method from an "alg" string
|
||||
func GetSigningMethod(alg string) (method SigningMethod) {
|
||||
signingMethodLock.RLock()
|
||||
defer signingMethodLock.RUnlock()
|
||||
|
||||
if methodF, ok := signingMethods[alg]; ok {
|
||||
method = methodF()
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
|
||||
// You can override it to use another time value. This is useful for testing or if your
|
||||
// server uses a different time zone than your tokens.
|
||||
var TimeFunc = time.Now
|
||||
|
||||
// Parse methods use this callback function to supply
|
||||
// the key for verification. The function receives the parsed,
|
||||
// but unverified Token. This allows you to use properties in the
|
||||
// Header of the token (such as `kid`) to identify which key to use.
|
||||
type Keyfunc func(*Token) (interface{}, error)
|
||||
|
||||
// A JWT Token. Different fields will be used depending on whether you're
|
||||
// creating or parsing/verifying a token.
|
||||
type Token struct {
|
||||
Raw string // The raw token. Populated when you Parse a token
|
||||
Method SigningMethod // The signing method used or to be used
|
||||
Header map[string]interface{} // The first segment of the token
|
||||
Claims Claims // The second segment of the token
|
||||
Signature string // The third segment of the token. Populated when you Parse a token
|
||||
Valid bool // Is the token valid? Populated when you Parse/Verify a token
|
||||
}
|
||||
|
||||
// Create a new Token. Takes a signing method
|
||||
func New(method SigningMethod) *Token {
|
||||
return NewWithClaims(method, MapClaims{})
|
||||
}
|
||||
|
||||
func NewWithClaims(method SigningMethod, claims Claims) *Token {
|
||||
return &Token{
|
||||
Header: map[string]interface{}{
|
||||
"typ": "JWT",
|
||||
"alg": method.Alg(),
|
||||
},
|
||||
Claims: claims,
|
||||
Method: method,
|
||||
}
|
||||
}
|
||||
|
||||
// Get the complete, signed token
|
||||
func (t *Token) SignedString(key interface{}) (string, error) {
|
||||
var sig, sstr string
|
||||
var err error
|
||||
if sstr, err = t.SigningString(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if sig, err = t.Method.Sign(sstr, key); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Join([]string{sstr, sig}, "."), nil
|
||||
}
|
||||
|
||||
// Generate the signing string. This is the
|
||||
// most expensive part of the whole deal. Unless you
|
||||
// need this for something special, just go straight for
|
||||
// the SignedString.
|
||||
func (t *Token) SigningString() (string, error) {
|
||||
var err error
|
||||
parts := make([]string, 2)
|
||||
for i, _ := range parts {
|
||||
var jsonValue []byte
|
||||
if i == 0 {
|
||||
if jsonValue, err = json.Marshal(t.Header); err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
if jsonValue, err = json.Marshal(t.Claims); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
parts[i] = EncodeSegment(jsonValue)
|
||||
}
|
||||
return strings.Join(parts, "."), nil
|
||||
}
|
||||
|
||||
// Parse, validate, and return a token.
|
||||
// keyFunc will receive the parsed token and should return the key for validating.
|
||||
// If everything is kosher, err will be nil
|
||||
func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
|
||||
return new(Parser).Parse(tokenString, keyFunc)
|
||||
}
|
||||
|
||||
func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
|
||||
return new(Parser).ParseWithClaims(tokenString, claims, keyFunc)
|
||||
}
|
||||
|
||||
// Encode JWT specific base64url encoding with padding stripped
|
||||
func EncodeSegment(seg []byte) string {
|
||||
return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
|
||||
}
|
||||
|
||||
// Decode JWT specific base64url encoding with padding stripped
|
||||
func DecodeSegment(seg string) ([]byte, error) {
|
||||
if l := len(seg) % 4; l > 0 {
|
||||
seg += strings.Repeat("=", 4-l)
|
||||
}
|
||||
|
||||
return base64.URLEncoding.DecodeString(seg)
|
||||
}
|
|
@ -21,12 +21,28 @@
|
|||
"revisionTime": "2016-08-11T21:22:31Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "M30X+Wqn7AnUr1numUOkQRI7ET0=",
|
||||
"path": "github.com/Azure/azure-sdk-for-go/storage",
|
||||
"revision": "e01c89c9c29b97413ae6ebe89a294d814ec7ca8b",
|
||||
"revisionTime": "2016-10-28T18:35:05Z",
|
||||
"version": "v6",
|
||||
"versionExact": "v6.0.0-beta"
|
||||
"checksumSHA1": "24EW3PGLGVSy/5joAOZdHzN/S2E=",
|
||||
"path": "github.com/Azure/azure-storage-go",
|
||||
"revision": "12ccaadb081cdd217702067d28da9a7ff4305239",
|
||||
"revisionTime": "2017-03-02T22:15:08Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "+sxS3YEvxf1VxlRskIdXFRWq9rY=",
|
||||
"path": "github.com/Azure/go-autorest/autorest",
|
||||
"revision": "ec5f4903f77ed9927ac95b19ab8e44ada64c1356",
|
||||
"revisionTime": "2017-02-15T19:58:14Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "ghrnc4vZv6q8zzeakZnrS8CGFhE=",
|
||||
"path": "github.com/Azure/go-autorest/autorest/azure",
|
||||
"revision": "ec5f4903f77ed9927ac95b19ab8e44ada64c1356",
|
||||
"revisionTime": "2017-02-15T19:58:14Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "q9Qz8PAxK5FTOZwgYKe5Lj38u4c=",
|
||||
"path": "github.com/Azure/go-autorest/autorest/date",
|
||||
"revision": "ec5f4903f77ed9927ac95b19ab8e44ada64c1356",
|
||||
"revisionTime": "2017-02-15T19:58:14Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "USkefO0g1U9mr+8hagv3fpSkrxg=",
|
||||
|
@ -52,6 +68,12 @@
|
|||
"revision": "346938d642f2ec3594ed81d874461961cd0faa76",
|
||||
"revisionTime": "2016-10-29T20:57:26Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "2Fy1Y6Z3lRRX1891WF/+HT4XS2I=",
|
||||
"path": "github.com/dgrijalva/jwt-go",
|
||||
"revision": "2268707a8f0843315e2004ee4f1d021dc08baedf",
|
||||
"revisionTime": "2017-02-01T22:58:49Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "zYnPsNAVm1/ViwCkN++dX2JQhBo=",
|
||||
"path": "github.com/edsrzf/mmap-go",
|
||||
|
|
Loading…
Reference in New Issue