fix(region): vendor update (#23867)

This commit is contained in:
屈轩
2025-11-30 17:20:38 +08:00
committed by GitHub
parent f95c341f44
commit 36612bec31
93 changed files with 1030 additions and 9060 deletions

7
go.mod
View File

@@ -98,7 +98,7 @@ require (
k8s.io/cri-api v0.28.15
k8s.io/klog/v2 v2.20.0
moul.io/http2curl/v2 v2.3.0
yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20251127081629-9d8a6d1fe822
yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20251130090640-988b91734be4
yunion.io/x/executor v0.0.0-20250518005516-5402e9e0bed0
yunion.io/x/jsonutils v1.0.1-0.20250507052344-1abcf4f443b1
yunion.io/x/log v1.0.1-0.20240305175729-7cf2d6cd5a91
@@ -116,9 +116,7 @@ require (
cloud.google.com/go/iam v1.1.6 // indirect
cloud.google.com/go/storage v1.39.1 // indirect
gitee.com/chunanyong/dm v1.8.14 // indirect
github.com/Azure/azure-sdk-for-go v36.1.0+incompatible // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.9.6 // indirect
github.com/Azure/go-autorest/autorest/adal v0.8.2 // indirect
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 // indirect
@@ -129,7 +127,6 @@ require (
github.com/ClickHouse/clickhouse-go v1.5.4 // indirect
github.com/DataDog/dd-trace-go v0.6.1 // indirect
github.com/DataDog/zstd v1.3.4 // indirect
github.com/Microsoft/azure-vhd-utils v0.0.0-20181115010904-44cbada2ece3 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/Microsoft/hcsshim v0.8.14 // indirect
github.com/RoaringBitmap/roaring v1.2.3 // indirect
@@ -397,6 +394,4 @@ require (
replace github.com/influxdata/promql/v2 => github.com/zexi/promql/v2 v2.12.1
replace github.com/Azure/azure-sdk-for-go => github.com/Azure/azure-sdk-for-go v36.1.0+incompatible
replace github.com/docker/docker => github.com/docker/docker v20.10.27+incompatible

15
go.sum
View File

@@ -52,13 +52,9 @@ gitee.com/chunanyong/dm v1.8.14 h1:1S9+aD0fY/HXkcm8dKh2HjYsDlcdXNG+4IOX9JtrSjA=
gitee.com/chunanyong/dm v1.8.14/go.mod h1:EPRJnuPFgbyOFgJ0TRYCTGzhq+ZT4wdyaj/GW/LLcNg=
github.com/360EntSecGroup-Skylar/excelize v1.4.0 h1:43rak9uafmwSJpXfFO1heKQph8tP3nlfWJWFQQtW1R0=
github.com/360EntSecGroup-Skylar/excelize v1.4.0/go.mod h1:R8KYLmGns0vDPe6/HyphW0mzW+MFexlGDafU0ykVEnU=
github.com/Azure/azure-sdk-for-go v36.1.0+incompatible h1:smHlbChr/JDmsyUqELZXLs0YIgpXecIGdUibuc2983s=
github.com/Azure/azure-sdk-for-go v36.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
github.com/Azure/go-autorest/autorest v0.9.6 h1:5YWtOnckcudzIw8lPPBcWOnmIFWMtHci1ZWAZulMSx0=
@@ -79,8 +75,6 @@ github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxB
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
@@ -95,8 +89,6 @@ github.com/DataDog/zstd v1.3.4 h1:LAGHkXuvC6yky+C2CUG2tD7w8QlrUwpue8XwIh0X4AY=
github.com/DataDog/zstd v1.3.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/LeeEirc/terminalparser v0.0.0-20240205084113-fbf78c8480f2 h1:XGB3B0651J1uKOE1KJa1gsrV/DO1kthhk2NTDUHATgs=
github.com/LeeEirc/terminalparser v0.0.0-20240205084113-fbf78c8480f2/go.mod h1:tiLv6VBLH4Z3KdBSe2qIKRwQDGCVQ9/F5fOKpQGvyoA=
github.com/Microsoft/azure-vhd-utils v0.0.0-20181115010904-44cbada2ece3 h1:gImoAO1xAcC1oDlYmD/X7dggsodGf2DFJOVE5m0ssms=
github.com/Microsoft/azure-vhd-utils v0.0.0-20181115010904-44cbada2ece3/go.mod h1:u0H9gMieFLxkUy8RS0X8VbFWyPs2815qQAaitRbj6x0=
github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
@@ -351,8 +343,6 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumC
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11 h1:m8nX8hsUghn853BJ5qB0lX+VvS6LTJPksWyILFZRYN4=
github.com/dnstap/golang-dnstap v0.0.0-20170829151710-2cf77a2b5e11/go.mod h1:s1PfVYYVmTMgCSPtho4LKBDecEHJWtiVDPNv78Z985U=
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible h1:dvc1KSkIYTVjZgHf/CTC2diTYC8PzhaA5sFISRfNVrE=
@@ -1606,7 +1596,6 @@ gopkg.in/mail.v2 v2.3.1 h1:WYFn/oANrAGP2C0dcV6/pbkPzv8yGzqTjPmTeO7qoXk=
gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.19.1/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -1672,8 +1661,8 @@ sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20251127081629-9d8a6d1fe822 h1:RV4teBoWPnlzeZfSMDqm2fWR+aysnv5QweXvYlRWXm4=
yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20251127081629-9d8a6d1fe822/go.mod h1:JlmQ8iuGdaAlUkivFVcUAJgi6QYr5jz1vKv7pQ5m7p8=
yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20251130090640-988b91734be4 h1:FsVs00YbXgtIAkG5vKlsuv3SJXFLQpyqRvRlaY4sc4I=
yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20251130090640-988b91734be4/go.mod h1:aWRX5Phwz3nbHUNnIAm1oVogjguXPYDDgCOy/9Hnnvk=
yunion.io/x/executor v0.0.0-20250518005516-5402e9e0bed0 h1:msG4SiDSVU7CrXH06WuHlNEZXIooTcmNbfrIGHuIHBU=
yunion.io/x/executor v0.0.0-20250518005516-5402e9e0bed0/go.mod h1:Uxuou9WQIeJXNpy7t2fPLL0BYLvLiMvGQwY7Qc6aSws=
yunion.io/x/jsonutils v0.0.0-20190625054549-a964e1e8a051/go.mod h1:4N0/RVzsYL3kH3WE/H1BjUQdFiWu50JGCFQuuy+Z634=

View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,5 +0,0 @@
Microsoft Azure-SDK-for-Go
Copyright 2014-2017 Microsoft
This product includes software developed at
the Microsoft Corporation (https://www.microsoft.com).

View File

@@ -1,22 +0,0 @@
# Azure Storage SDK for Go (Preview)
:exclamation: IMPORTANT: This package is in maintenance only and will be deprecated in the
future. Please use one of the following packages instead.
| Service | Import Path/Repo |
|---------|------------------|
| Storage - Blobs | [github.com/Azure/azure-storage-blob-go](https://github.com/Azure/azure-storage-blob-go) |
| Storage - Files | [github.com/Azure/azure-storage-file-go](https://github.com/Azure/azure-storage-file-go) |
| Storage - Queues | [github.com/Azure/azure-storage-queue-go](https://github.com/Azure/azure-storage-queue-go) |
The `github.com/Azure/azure-sdk-for-go/storage` package is used to manage
[Azure Storage](https://docs.microsoft.com/en-us/azure/storage/) data plane
resources: containers, blobs, tables, and queues.
To manage storage *accounts* use Azure Resource Manager (ARM) via the packages
at [github.com/Azure/azure-sdk-for-go/services/storage](https://github.com/Azure/azure-sdk-for-go/tree/master/services/storage).
This package also supports the [Azure Storage
Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/)
(Windows only).

View File

@@ -1,91 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"crypto/md5"
"encoding/base64"
"fmt"
"net/http"
"net/url"
"time"
)
// PutAppendBlob initializes an empty append blob with specified name. An
// append blob must be created using this method before appending blocks.
//
// See CreateBlockBlobFromReader for more info on creating blobs.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
func (b *Blob) PutAppendBlob(options *PutBlobOptions) error {
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypeAppend)
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
return b.respondCreation(resp, BlobTypeAppend)
}
// AppendBlockOptions includes the options for an append block operation
type AppendBlockOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
MaxSize *uint `header:"x-ms-blob-condition-maxsize"`
AppendPosition *uint `header:"x-ms-blob-condition-appendpos"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
ContentMD5 bool
}
// AppendBlock appends a block to an append blob.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Append-Block
func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error {
params := url.Values{"comp": {"appendblock"}}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypeAppend)
headers["Content-Length"] = fmt.Sprintf("%v", len(chunk))
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
if options.ContentMD5 {
md5sum := md5.Sum(chunk)
headers[headerContentMD5] = base64.StdEncoding.EncodeToString(md5sum[:])
}
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.Container.bsc.auth)
if err != nil {
return err
}
return b.respondCreation(resp, BlobTypeAppend)
}

View File

@@ -1,246 +0,0 @@
// Package storage provides clients for Microsoft Azure Storage Services.
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"fmt"
"net/url"
"sort"
"strings"
)
// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/authentication-for-the-azure-storage-services
type authentication string
const (
sharedKey authentication = "sharedKey"
sharedKeyForTable authentication = "sharedKeyTable"
sharedKeyLite authentication = "sharedKeyLite"
sharedKeyLiteForTable authentication = "sharedKeyLiteTable"
// headers
headerAcceptCharset = "Accept-Charset"
headerAuthorization = "Authorization"
headerContentLength = "Content-Length"
headerDate = "Date"
headerXmsDate = "x-ms-date"
headerXmsVersion = "x-ms-version"
headerContentEncoding = "Content-Encoding"
headerContentLanguage = "Content-Language"
headerContentType = "Content-Type"
headerContentMD5 = "Content-MD5"
headerIfModifiedSince = "If-Modified-Since"
headerIfMatch = "If-Match"
headerIfNoneMatch = "If-None-Match"
headerIfUnmodifiedSince = "If-Unmodified-Since"
headerRange = "Range"
headerDataServiceVersion = "DataServiceVersion"
headerMaxDataServiceVersion = "MaxDataServiceVersion"
headerContentTransferEncoding = "Content-Transfer-Encoding"
)
func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) {
if !c.sasClient {
authHeader, err := c.getSharedKey(verb, url, headers, auth)
if err != nil {
return nil, err
}
headers[headerAuthorization] = authHeader
}
return headers, nil
}
func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) {
canRes, err := c.buildCanonicalizedResource(url, auth, false)
if err != nil {
return "", err
}
canString, err := buildCanonicalizedString(verb, headers, canRes, auth)
if err != nil {
return "", err
}
return c.createAuthorizationHeader(canString, auth), nil
}
func (c *Client) buildCanonicalizedResource(uri string, auth authentication, sas bool) (string, error) {
errMsg := "buildCanonicalizedResource error: %s"
u, err := url.Parse(uri)
if err != nil {
return "", fmt.Errorf(errMsg, err.Error())
}
cr := bytes.NewBufferString("")
if c.accountName != StorageEmulatorAccountName || !sas {
cr.WriteString("/")
cr.WriteString(c.getCanonicalizedAccountName())
}
if len(u.Path) > 0 {
// Any portion of the CanonicalizedResource string that is derived from
// the resource's URI should be encoded exactly as it is in the URI.
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
cr.WriteString(u.EscapedPath())
}
params, err := url.ParseQuery(u.RawQuery)
if err != nil {
return "", fmt.Errorf(errMsg, err.Error())
}
// See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277
if auth == sharedKey {
if len(params) > 0 {
cr.WriteString("\n")
keys := []string{}
for key := range params {
keys = append(keys, key)
}
sort.Strings(keys)
completeParams := []string{}
for _, key := range keys {
if len(params[key]) > 1 {
sort.Strings(params[key])
}
completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")))
}
cr.WriteString(strings.Join(completeParams, "\n"))
}
} else {
// search for "comp" parameter, if exists then add it to canonicalizedresource
if v, ok := params["comp"]; ok {
cr.WriteString("?comp=" + v[0])
}
}
return string(cr.Bytes()), nil
}
func (c *Client) getCanonicalizedAccountName() string {
// since we may be trying to access a secondary storage account, we need to
// remove the -secondary part of the storage name
return strings.TrimSuffix(c.accountName, "-secondary")
}
func buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string, auth authentication) (string, error) {
contentLength := headers[headerContentLength]
if contentLength == "0" {
contentLength = ""
}
date := headers[headerDate]
if v, ok := headers[headerXmsDate]; ok {
if auth == sharedKey || auth == sharedKeyLite {
date = ""
} else {
date = v
}
}
var canString string
switch auth {
case sharedKey:
canString = strings.Join([]string{
verb,
headers[headerContentEncoding],
headers[headerContentLanguage],
contentLength,
headers[headerContentMD5],
headers[headerContentType],
date,
headers[headerIfModifiedSince],
headers[headerIfMatch],
headers[headerIfNoneMatch],
headers[headerIfUnmodifiedSince],
headers[headerRange],
buildCanonicalizedHeader(headers),
canonicalizedResource,
}, "\n")
case sharedKeyForTable:
canString = strings.Join([]string{
verb,
headers[headerContentMD5],
headers[headerContentType],
date,
canonicalizedResource,
}, "\n")
case sharedKeyLite:
canString = strings.Join([]string{
verb,
headers[headerContentMD5],
headers[headerContentType],
date,
buildCanonicalizedHeader(headers),
canonicalizedResource,
}, "\n")
case sharedKeyLiteForTable:
canString = strings.Join([]string{
date,
canonicalizedResource,
}, "\n")
default:
return "", fmt.Errorf("%s authentication is not supported yet", auth)
}
return canString, nil
}
func buildCanonicalizedHeader(headers map[string]string) string {
cm := make(map[string]string)
for k, v := range headers {
headerName := strings.TrimSpace(strings.ToLower(k))
if strings.HasPrefix(headerName, "x-ms-") {
cm[headerName] = v
}
}
if len(cm) == 0 {
return ""
}
keys := []string{}
for key := range cm {
keys = append(keys, key)
}
sort.Strings(keys)
ch := bytes.NewBufferString("")
for _, key := range keys {
ch.WriteString(key)
ch.WriteRune(':')
ch.WriteString(cm[key])
ch.WriteRune('\n')
}
return strings.TrimSuffix(string(ch.Bytes()), "\n")
}
func (c *Client) createAuthorizationHeader(canonicalizedString string, auth authentication) string {
signature := c.computeHmac256(canonicalizedString)
var key string
switch auth {
case sharedKey, sharedKeyForTable:
key = "SharedKey"
case sharedKeyLite, sharedKeyLiteForTable:
key = "SharedKeyLite"
}
return fmt.Sprintf("%s %s:%s", key, c.getCanonicalizedAccountName(), signature)
}

View File

@@ -1,632 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
// A Blob is an entry in BlobListResponse.
type Blob struct {
Container *Container
Name string `xml:"Name"`
Snapshot time.Time `xml:"Snapshot"`
Properties BlobProperties `xml:"Properties"`
Metadata BlobMetadata `xml:"Metadata"`
}
// PutBlobOptions includes the options any put blob operation
// (page, block, append)
type PutBlobOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
Origin string `header:"Origin"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// BlobMetadata is a set of custom name/value pairs.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx
type BlobMetadata map[string]string
type blobMetadataEntries struct {
Entries []blobMetadataEntry `xml:",any"`
}
type blobMetadataEntry struct {
XMLName xml.Name
Value string `xml:",chardata"`
}
// UnmarshalXML converts the xml:Metadata into Metadata map
func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var entries blobMetadataEntries
if err := d.DecodeElement(&entries, &start); err != nil {
return err
}
for _, entry := range entries.Entries {
if *bm == nil {
*bm = make(BlobMetadata)
}
(*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value
}
return nil
}
// MarshalXML implements the xml.Marshaler interface. It encodes
// metadata name/value pairs as they would appear in an Azure
// ListBlobs response.
func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error {
entries := make([]blobMetadataEntry, 0, len(bm))
for k, v := range bm {
entries = append(entries, blobMetadataEntry{
XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)},
Value: v,
})
}
return enc.EncodeElement(blobMetadataEntries{
Entries: entries,
}, start)
}
// BlobProperties contains various properties of a blob
// returned in various endpoints like ListBlobs or GetBlobProperties.
type BlobProperties struct {
LastModified TimeRFC1123 `xml:"Last-Modified"`
Etag string `xml:"Etag"`
ContentMD5 string `xml:"Content-MD5" header:"x-ms-blob-content-md5"`
ContentLength int64 `xml:"Content-Length"`
ContentType string `xml:"Content-Type" header:"x-ms-blob-content-type"`
ContentEncoding string `xml:"Content-Encoding" header:"x-ms-blob-content-encoding"`
CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"`
ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"`
ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"`
BlobType BlobType `xml:"BlobType"`
SequenceNumber int64 `xml:"x-ms-blob-sequence-number"`
CopyID string `xml:"CopyId"`
CopyStatus string `xml:"CopyStatus"`
CopySource string `xml:"CopySource"`
CopyProgress string `xml:"CopyProgress"`
CopyCompletionTime TimeRFC1123 `xml:"CopyCompletionTime"`
CopyStatusDescription string `xml:"CopyStatusDescription"`
LeaseStatus string `xml:"LeaseStatus"`
LeaseState string `xml:"LeaseState"`
LeaseDuration string `xml:"LeaseDuration"`
ServerEncrypted bool `xml:"ServerEncrypted"`
IncrementalCopy bool `xml:"IncrementalCopy"`
}
// BlobType defines the type of the Azure Blob.
type BlobType string
// Types of page blobs
const (
BlobTypeBlock BlobType = "BlockBlob"
BlobTypePage BlobType = "PageBlob"
BlobTypeAppend BlobType = "AppendBlob"
)
func (b *Blob) buildPath() string {
return b.Container.buildPath() + "/" + b.Name
}
// Exists returns true if a blob with given name exists on the specified
// container of the storage account.
func (b *Blob) Exists() (bool, error) {
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), nil)
headers := b.Container.bsc.client.getStandardHeaders()
resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusOK, nil
}
}
return false, err
}
// GetURL gets the canonical URL to the blob with the specified name in the
// specified container.
// This method does not create a publicly accessible URL if the blob or container
// is private and this method does not check if the blob exists.
func (b *Blob) GetURL() string {
container := b.Container.Name
if container == "" {
container = "$root"
}
return b.Container.bsc.client.getEndpoint(blobServiceName, pathForResource(container, b.Name), nil)
}
// GetBlobRangeOptions includes the options for a get blob range operation
type GetBlobRangeOptions struct {
Range *BlobRange
GetRangeContentMD5 bool
*GetBlobOptions
}
// GetBlobOptions includes the options for a get blob operation
type GetBlobOptions struct {
Timeout uint
Snapshot *time.Time
LeaseID string `header:"x-ms-lease-id"`
Origin string `header:"Origin"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// BlobRange represents the bytes range to be get
type BlobRange struct {
Start uint64
End uint64
}
func (br BlobRange) String() string {
if br.End == 0 {
return fmt.Sprintf("bytes=%d-", br.Start)
}
return fmt.Sprintf("bytes=%d-%d", br.Start, br.End)
}
// Get returns a stream to read the blob. Caller must call both Read and Close()
// to correctly close the underlying connection.
//
// See the GetRange method for use with a Range header.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob
func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) {
rangeOptions := GetBlobRangeOptions{
GetBlobOptions: options,
}
resp, err := b.getRange(&rangeOptions)
if err != nil {
return nil, err
}
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
return nil, err
}
if err := b.writeProperties(resp.Header, true); err != nil {
return resp.Body, err
}
return resp.Body, nil
}
// GetRange reads the specified range of a blob to a stream. The bytesRange
// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec.
// Caller must call both Read and Close()// to correctly close the underlying
// connection.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob
func (b *Blob) GetRange(options *GetBlobRangeOptions) (io.ReadCloser, error) {
resp, err := b.getRange(options)
if err != nil {
return nil, err
}
if err := checkRespCode(resp, []int{http.StatusPartialContent}); err != nil {
return nil, err
}
// Content-Length header should not be updated, as the service returns the range length
// (which is not alwys the full blob length)
if err := b.writeProperties(resp.Header, false); err != nil {
return resp.Body, err
}
return resp.Body, nil
}
func (b *Blob) getRange(options *GetBlobRangeOptions) (*http.Response, error) {
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
if options != nil {
if options.Range != nil {
headers["Range"] = options.Range.String()
if options.GetRangeContentMD5 {
headers["x-ms-range-get-content-md5"] = "true"
}
}
if options.GetBlobOptions != nil {
headers = mergeHeaders(headers, headersFromStruct(*options.GetBlobOptions))
params = addTimeout(params, options.Timeout)
params = addSnapshot(params, options.Snapshot)
}
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return nil, err
}
return resp, err
}
// SnapshotOptions includes the options for a snapshot blob operation
type SnapshotOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// CreateSnapshot creates a snapshot for a blob
// See https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx
func (b *Blob) CreateSnapshot(options *SnapshotOptions) (snapshotTimestamp *time.Time, err error) {
params := url.Values{"comp": {"snapshot"}}
headers := b.Container.bsc.client.getStandardHeaders()
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil || resp == nil {
return nil, err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{http.StatusCreated}); err != nil {
return nil, err
}
snapshotResponse := resp.Header.Get(http.CanonicalHeaderKey("x-ms-snapshot"))
if snapshotResponse != "" {
snapshotTimestamp, err := time.Parse(time.RFC3339, snapshotResponse)
if err != nil {
return nil, err
}
return &snapshotTimestamp, nil
}
return nil, errors.New("Snapshot not created")
}
// GetBlobPropertiesOptions includes the options for a get blob properties operation
type GetBlobPropertiesOptions struct {
Timeout uint
Snapshot *time.Time
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// GetProperties provides various information about the specified blob.
// See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx
func (b *Blob) GetProperties(options *GetBlobPropertiesOptions) error {
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
params = addSnapshot(params, options.Snapshot)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
return b.writeProperties(resp.Header, true)
}
func (b *Blob) writeProperties(h http.Header, includeContentLen bool) error {
var err error
contentLength := b.Properties.ContentLength
if includeContentLen {
contentLengthStr := h.Get("Content-Length")
if contentLengthStr != "" {
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
if err != nil {
return err
}
}
}
var sequenceNum int64
sequenceNumStr := h.Get("x-ms-blob-sequence-number")
if sequenceNumStr != "" {
sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64)
if err != nil {
return err
}
}
lastModified, err := getTimeFromHeaders(h, "Last-Modified")
if err != nil {
return err
}
copyCompletionTime, err := getTimeFromHeaders(h, "x-ms-copy-completion-time")
if err != nil {
return err
}
b.Properties = BlobProperties{
LastModified: TimeRFC1123(*lastModified),
Etag: h.Get("Etag"),
ContentMD5: h.Get("Content-MD5"),
ContentLength: contentLength,
ContentEncoding: h.Get("Content-Encoding"),
ContentType: h.Get("Content-Type"),
ContentDisposition: h.Get("Content-Disposition"),
CacheControl: h.Get("Cache-Control"),
ContentLanguage: h.Get("Content-Language"),
SequenceNumber: sequenceNum,
CopyCompletionTime: TimeRFC1123(*copyCompletionTime),
CopyStatusDescription: h.Get("x-ms-copy-status-description"),
CopyID: h.Get("x-ms-copy-id"),
CopyProgress: h.Get("x-ms-copy-progress"),
CopySource: h.Get("x-ms-copy-source"),
CopyStatus: h.Get("x-ms-copy-status"),
BlobType: BlobType(h.Get("x-ms-blob-type")),
LeaseStatus: h.Get("x-ms-lease-status"),
LeaseState: h.Get("x-ms-lease-state"),
}
b.writeMetadata(h)
return nil
}
// SetBlobPropertiesOptions contains various properties of a blob and is an entry
// in SetProperties
type SetBlobPropertiesOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
Origin string `header:"Origin"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
SequenceNumberAction *SequenceNumberAction
RequestID string `header:"x-ms-client-request-id"`
}
// SequenceNumberAction defines how the blob's sequence number should be modified
type SequenceNumberAction string
// Options for sequence number action
const (
SequenceNumberActionMax SequenceNumberAction = "max"
SequenceNumberActionUpdate SequenceNumberAction = "update"
SequenceNumberActionIncrement SequenceNumberAction = "increment"
)
// SetProperties replaces the BlobHeaders for the specified blob.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetBlobProperties. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Blob-Properties
func (b *Blob) SetProperties(options *SetBlobPropertiesOptions) error {
params := url.Values{"comp": {"properties"}}
headers := b.Container.bsc.client.getStandardHeaders()
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
if b.Properties.BlobType == BlobTypePage {
headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("%v", b.Properties.ContentLength))
if options != nil && options.SequenceNumberAction != nil {
headers = addToHeaders(headers, "x-ms-sequence-number-action", string(*options.SequenceNumberAction))
if *options.SequenceNumberAction != SequenceNumberActionIncrement {
headers = addToHeaders(headers, "x-ms-blob-sequence-number", fmt.Sprintf("%v", b.Properties.SequenceNumber))
}
}
}
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusOK})
}
// SetBlobMetadataOptions includes the options for a set blob metadata operation
type SetBlobMetadataOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// SetMetadata replaces the metadata for the specified blob.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetBlobMetadata. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
func (b *Blob) SetMetadata(options *SetBlobMetadataOptions) error {
params := url.Values{"comp": {"metadata"}}
headers := b.Container.bsc.client.getStandardHeaders()
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusOK})
}
// GetBlobMetadataOptions includes the options for a get blob metadata operation
type GetBlobMetadataOptions struct {
Timeout uint
Snapshot *time.Time
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// GetMetadata returns all user-defined metadata for the specified blob.
//
// All metadata keys will be returned in lower case. (HTTP header
// names are case-insensitive.)
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
func (b *Blob) GetMetadata(options *GetBlobMetadataOptions) error {
params := url.Values{"comp": {"metadata"}}
headers := b.Container.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
params = addSnapshot(params, options.Snapshot)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
b.writeMetadata(resp.Header)
return nil
}
func (b *Blob) writeMetadata(h http.Header) {
b.Metadata = BlobMetadata(writeMetadata(h))
}
// DeleteBlobOptions includes the options for a delete blob operation
type DeleteBlobOptions struct {
Timeout uint
Snapshot *time.Time
LeaseID string `header:"x-ms-lease-id"`
DeleteSnapshots *bool
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// Delete deletes the given blob from the specified container.
// If the blob does not exists at the time of the Delete Blob operation, it
// returns error.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob
func (b *Blob) Delete(options *DeleteBlobOptions) error {
resp, err := b.delete(options)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusAccepted})
}
// DeleteIfExists deletes the given blob from the specified container If the
// blob is deleted with this call, returns true. Otherwise returns false.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob
func (b *Blob) DeleteIfExists(options *DeleteBlobOptions) (bool, error) {
resp, err := b.delete(options)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusAccepted, nil
}
}
return false, err
}
func (b *Blob) delete(options *DeleteBlobOptions) (*http.Response, error) {
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
params = addSnapshot(params, options.Snapshot)
headers = mergeHeaders(headers, headersFromStruct(*options))
if options.DeleteSnapshots != nil {
if *options.DeleteSnapshots {
headers["x-ms-delete-snapshots"] = "include"
} else {
headers["x-ms-delete-snapshots"] = "only"
}
}
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
return b.Container.bsc.client.exec(http.MethodDelete, uri, headers, nil, b.Container.bsc.auth)
}
// helper method to construct the path to either a blob or container
func pathForResource(container, name string) string {
if name != "" {
return fmt.Sprintf("/%s/%s", container, name)
}
return fmt.Sprintf("/%s", container)
}
func (b *Blob) respondCreation(resp *http.Response, bt BlobType) error {
defer drainRespBody(resp)
err := checkRespCode(resp, []int{http.StatusCreated})
if err != nil {
return err
}
b.Properties.BlobType = bt
return nil
}

View File

@@ -1,179 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"errors"
"fmt"
"net/url"
"strings"
"time"
)
// OverrideHeaders defines overridable response heaedrs in
// a request using a SAS URI.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
type OverrideHeaders struct {
CacheControl string
ContentDisposition string
ContentEncoding string
ContentLanguage string
ContentType string
}
// BlobSASOptions are options to construct a blob SAS
// URI.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
type BlobSASOptions struct {
BlobServiceSASPermissions
OverrideHeaders
SASOptions
}
// BlobServiceSASPermissions includes the available permissions for
// blob service SAS URI.
type BlobServiceSASPermissions struct {
Read bool
Add bool
Create bool
Write bool
Delete bool
}
func (p BlobServiceSASPermissions) buildString() string {
permissions := ""
if p.Read {
permissions += "r"
}
if p.Add {
permissions += "a"
}
if p.Create {
permissions += "c"
}
if p.Write {
permissions += "w"
}
if p.Delete {
permissions += "d"
}
return permissions
}
// GetSASURI creates an URL to the blob which contains the Shared
// Access Signature with the specified options.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
func (b *Blob) GetSASURI(options BlobSASOptions) (string, error) {
uri := b.GetURL()
signedResource := "b"
canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(uri, b.Container.bsc.auth, true)
if err != nil {
return "", err
}
permissions := options.BlobServiceSASPermissions.buildString()
return b.Container.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders)
}
func (c *Client) blobAndFileSASURI(options SASOptions, uri, permissions, canonicalizedResource, signedResource string, headers OverrideHeaders) (string, error) {
start := ""
if options.Start != (time.Time{}) {
start = options.Start.UTC().Format(time.RFC3339)
}
expiry := options.Expiry.UTC().Format(time.RFC3339)
// We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1)
canonicalizedResource, err := url.QueryUnescape(canonicalizedResource)
if err != nil {
return "", err
}
protocols := ""
if options.UseHTTPS {
protocols = "https"
}
stringToSign, err := blobSASStringToSign(permissions, start, expiry, canonicalizedResource, options.Identifier, options.IP, protocols, c.apiVersion, signedResource, "", headers)
if err != nil {
return "", err
}
sig := c.computeHmac256(stringToSign)
sasParams := url.Values{
"sv": {c.apiVersion},
"se": {expiry},
"sr": {signedResource},
"sp": {permissions},
"sig": {sig},
}
if start != "" {
sasParams.Add("st", start)
}
if c.apiVersion >= "2015-04-05" {
if protocols != "" {
sasParams.Add("spr", protocols)
}
if options.IP != "" {
sasParams.Add("sip", options.IP)
}
}
// Add override response hedaers
addQueryParameter(sasParams, "rscc", headers.CacheControl)
addQueryParameter(sasParams, "rscd", headers.ContentDisposition)
addQueryParameter(sasParams, "rsce", headers.ContentEncoding)
addQueryParameter(sasParams, "rscl", headers.ContentLanguage)
addQueryParameter(sasParams, "rsct", headers.ContentType)
sasURL, err := url.Parse(uri)
if err != nil {
return "", err
}
sasURL.RawQuery = sasParams.Encode()
return sasURL.String(), nil
}
func blobSASStringToSign(signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, signedResource, signedSnapshotTime string, headers OverrideHeaders) (string, error) {
rscc := headers.CacheControl
rscd := headers.ContentDisposition
rsce := headers.ContentEncoding
rscl := headers.ContentLanguage
rsct := headers.ContentType
if signedVersion >= "2015-02-21" {
canonicalizedResource = "/blob" + canonicalizedResource
}
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
if signedVersion >= "2018-11-09" {
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, signedResource, signedSnapshotTime, rscc, rscd, rsce, rscl, rsct), nil
}
// https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12
if signedVersion >= "2015-04-05" {
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
}
// reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
if signedVersion >= "2013-08-15" {
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
}
return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
}

View File

@@ -1,186 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
)
// BlobStorageClient contains operations for Microsoft Azure Blob Storage
// Service.
type BlobStorageClient struct {
client Client
auth authentication
}
// GetServiceProperties gets the properties of your storage account's blob service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties
func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) {
return b.client.getServiceProperties(blobServiceName, b.auth)
}
// SetServiceProperties sets the properties of your storage account's blob service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties
func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error {
return b.client.setServiceProperties(props, blobServiceName, b.auth)
}
// ListContainersParameters defines the set of customizable parameters to make a
// List Containers call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
type ListContainersParameters struct {
Prefix string
Marker string
Include string
MaxResults uint
Timeout uint
}
// GetContainerReference returns a Container object for the specified container name.
func (b *BlobStorageClient) GetContainerReference(name string) *Container {
return &Container{
bsc: b,
Name: name,
}
}
// GetContainerReferenceFromSASURI returns a Container object for the specified
// container SASURI
func GetContainerReferenceFromSASURI(sasuri url.URL) (*Container, error) {
path := strings.Split(sasuri.Path, "/")
if len(path) <= 1 {
return nil, fmt.Errorf("could not find a container in URI: %s", sasuri.String())
}
c, err := newSASClientFromURL(&sasuri)
if err != nil {
return nil, err
}
cli := c.GetBlobService()
return &Container{
bsc: &cli,
Name: path[1],
sasuri: sasuri,
}, nil
}
// ListContainers returns the list of containers in a storage account along with
// pagination token and other response details.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) {
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
uri := b.client.getEndpoint(blobServiceName, "", q)
headers := b.client.getStandardHeaders()
type ContainerAlias struct {
bsc *BlobStorageClient
Name string `xml:"Name"`
Properties ContainerProperties `xml:"Properties"`
Metadata BlobMetadata
sasuri url.URL
}
type ContainerListResponseAlias struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Prefix string `xml:"Prefix"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxResults int64 `xml:"MaxResults"`
Containers []ContainerAlias `xml:"Containers>Container"`
}
var outAlias ContainerListResponseAlias
resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &outAlias)
if err != nil {
return nil, err
}
out := ContainerListResponse{
XMLName: outAlias.XMLName,
Xmlns: outAlias.Xmlns,
Prefix: outAlias.Prefix,
Marker: outAlias.Marker,
NextMarker: outAlias.NextMarker,
MaxResults: outAlias.MaxResults,
Containers: make([]Container, len(outAlias.Containers)),
}
for i, cnt := range outAlias.Containers {
out.Containers[i] = Container{
bsc: &b,
Name: cnt.Name,
Properties: cnt.Properties,
Metadata: map[string]string(cnt.Metadata),
sasuri: cnt.sasuri,
}
}
return &out, err
}
func (p ListContainersParameters) getParameters() url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.Include != "" {
out.Set("include", p.Include)
}
if p.MaxResults != 0 {
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
}
if p.Timeout != 0 {
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
}
return out
}
func writeMetadata(h http.Header) map[string]string {
metadata := make(map[string]string)
for k, v := range h {
// Can't trust CanonicalHeaderKey() to munge case
// reliably. "_" is allowed in identifiers:
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
// http://tools.ietf.org/html/rfc7230#section-3.2
// ...but "_" is considered invalid by
// CanonicalMIMEHeaderKey in
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
// so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl".
k = strings.ToLower(k)
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
continue
}
// metadata["lol"] = content of the last X-Ms-Meta-Lol header
k = k[len(userDefinedMetadataHeaderPrefix):]
metadata[k] = v[len(v)-1]
}
return metadata
}

View File

@@ -1,311 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
// BlockListType is used to filter out types of blocks in a Get Blocks List call
// for a block blob.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all
// block types.
type BlockListType string
// Filters for listing blocks in block blobs
const (
BlockListTypeAll BlockListType = "all"
BlockListTypeCommitted BlockListType = "committed"
BlockListTypeUncommitted BlockListType = "uncommitted"
)
// Maximum sizes (per REST API) for various concepts
const (
MaxBlobBlockSize = 100 * 1024 * 1024
MaxBlobPageSize = 4 * 1024 * 1024
)
// BlockStatus defines states a block for a block blob can
// be in.
type BlockStatus string
// List of statuses that can be used to refer to a block in a block list
const (
BlockStatusUncommitted BlockStatus = "Uncommitted"
BlockStatusCommitted BlockStatus = "Committed"
BlockStatusLatest BlockStatus = "Latest"
)
// Block is used to create Block entities for Put Block List
// call.
type Block struct {
ID string
Status BlockStatus
}
// BlockListResponse contains the response fields from Get Block List call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx
type BlockListResponse struct {
XMLName xml.Name `xml:"BlockList"`
CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"`
UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"`
}
// BlockResponse contains the block information returned
// in the GetBlockListCall.
type BlockResponse struct {
Name string `xml:"Name"`
Size int64 `xml:"Size"`
}
// CreateBlockBlob initializes an empty block blob with no blocks.
//
// See CreateBlockBlobFromReader for more info on creating blobs.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error {
return b.CreateBlockBlobFromReader(nil, options)
}
// CreateBlockBlobFromReader initializes a block blob using data from
// reader. Size must be the number of bytes read from reader. To
// create an empty blob, use size==0 and reader==nil.
//
// Any headers set in blob.Properties or metadata in blob.Metadata
// will be set on the blob.
//
// The API rejects requests with size > 256 MiB (but this limit is not
// checked by the SDK). To write a larger blob, use CreateBlockBlob,
// PutBlock, and PutBlockList.
//
// To create a blob from scratch, call container.GetBlobReference() to
// get an empty blob, fill in blob.Properties and blob.Metadata as
// appropriate then call this method.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error {
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypeBlock)
headers["Content-Length"] = "0"
var n int64
var err error
if blob != nil {
type lener interface {
Len() int
}
// TODO(rjeczalik): handle io.ReadSeeker, in case blob is *os.File etc.
if l, ok := blob.(lener); ok {
n = int64(l.Len())
} else {
var buf bytes.Buffer
n, err = io.Copy(&buf, blob)
if err != nil {
return err
}
blob = &buf
}
headers["Content-Length"] = strconv.FormatInt(n, 10)
}
b.Properties.ContentLength = n
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth)
if err != nil {
return err
}
return b.respondCreation(resp, BlobTypeBlock)
}
// PutBlockOptions includes the options for a put block operation
type PutBlockOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
ContentMD5 string `header:"Content-MD5"`
RequestID string `header:"x-ms-client-request-id"`
}
// PutBlock saves the given data chunk to the specified block blob with
// given ID.
//
// The API rejects chunks larger than 100 MiB (but this limit is not
// checked by the SDK).
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block
func (b *Blob) PutBlock(blockID string, chunk []byte, options *PutBlockOptions) error {
return b.PutBlockWithLength(blockID, uint64(len(chunk)), bytes.NewReader(chunk), options)
}
// PutBlockWithLength saves the given data stream of exactly specified size to
// the block blob with given ID. It is an alternative to PutBlocks where data
// comes as stream but the length is known in advance.
//
// The API rejects requests with size > 100 MiB (but this limit is not
// checked by the SDK).
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block
func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, options *PutBlockOptions) error {
query := url.Values{
"comp": {"block"},
"blockid": {blockID},
}
headers := b.Container.bsc.client.getStandardHeaders()
headers["Content-Length"] = fmt.Sprintf("%v", size)
if options != nil {
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth)
if err != nil {
return err
}
return b.respondCreation(resp, BlobTypeBlock)
}
// PutBlockFromURLOptions includes the options for a put block from URL operation
type PutBlockFromURLOptions struct {
PutBlockOptions
SourceContentMD5 string `header:"x-ms-source-content-md5"`
SourceContentCRC64 string `header:"x-ms-source-content-crc64"`
}
// PutBlockFromURL copy data of exactly specified size from specified URL to
// the block blob with given ID. It is an alternative to PutBlocks where data
// comes from a remote URL and the offset and length is known in advance.
//
// The API rejects requests with size > 100 MiB (but this limit is not
// checked by the SDK).
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url
func (b *Blob) PutBlockFromURL(blockID string, blobURL string, offset int64, size uint64, options *PutBlockFromURLOptions) error {
query := url.Values{
"comp": {"block"},
"blockid": {blockID},
}
headers := b.Container.bsc.client.getStandardHeaders()
// The value of this header must be set to zero.
// When the length is not zero, the operation will fail with the status code 400 (Bad Request).
headers["Content-Length"] = "0"
headers["x-ms-copy-source"] = blobURL
headers["x-ms-source-range"] = fmt.Sprintf("bytes=%d-%d", offset, uint64(offset)+size-1)
if options != nil {
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
return b.respondCreation(resp, BlobTypeBlock)
}
// PutBlockListOptions includes the options for a put block list operation
type PutBlockListOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// PutBlockList saves list of blocks to the specified block blob.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block-List
func (b *Blob) PutBlockList(blocks []Block, options *PutBlockListOptions) error {
params := url.Values{"comp": {"blocklist"}}
blockListXML := prepareBlockListRequest(blocks)
headers := b.Container.bsc.client.getStandardHeaders()
headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML))
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusCreated})
}
// GetBlockListOptions includes the options for a get block list operation
type GetBlockListOptions struct {
Timeout uint
Snapshot *time.Time
LeaseID string `header:"x-ms-lease-id"`
RequestID string `header:"x-ms-client-request-id"`
}
// GetBlockList retrieves list of blocks in the specified block blob.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Block-List
func (b *Blob) GetBlockList(blockType BlockListType, options *GetBlockListOptions) (BlockListResponse, error) {
params := url.Values{
"comp": {"blocklist"},
"blocklisttype": {string(blockType)},
}
headers := b.Container.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
params = addSnapshot(params, options.Snapshot)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
var out BlockListResponse
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
return out, err
}

View File

@@ -1,991 +0,0 @@
// Package storage provides clients for Microsoft Azure Storage Services.
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bufio"
"encoding/base64"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"mime/multipart"
"net/http"
"net/url"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/version"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
)
const (
// DefaultBaseURL is the domain name used for storage requests in the
// public cloud when a default client is created.
DefaultBaseURL = "core.windows.net"
// DefaultAPIVersion is the Azure Storage API version string used when a
// basic client is created.
DefaultAPIVersion = "2018-03-28"
defaultUseHTTPS = true
defaultRetryAttempts = 5
defaultRetryDuration = time.Second * 5
// StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator
StorageEmulatorAccountName = "devstoreaccount1"
// StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator
StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
blobServiceName = "blob"
tableServiceName = "table"
queueServiceName = "queue"
fileServiceName = "file"
storageEmulatorBlob = "127.0.0.1:10000"
storageEmulatorTable = "127.0.0.1:10002"
storageEmulatorQueue = "127.0.0.1:10001"
userAgentHeader = "User-Agent"
userDefinedMetadataHeaderPrefix = "x-ms-meta-"
connectionStringAccountName = "accountname"
connectionStringAccountKey = "accountkey"
connectionStringEndpointSuffix = "endpointsuffix"
connectionStringEndpointProtocol = "defaultendpointsprotocol"
connectionStringBlobEndpoint = "blobendpoint"
connectionStringFileEndpoint = "fileendpoint"
connectionStringQueueEndpoint = "queueendpoint"
connectionStringTableEndpoint = "tableendpoint"
connectionStringSAS = "sharedaccesssignature"
)
var (
validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$")
defaultValidStatusCodes = []int{
http.StatusRequestTimeout, // 408
http.StatusInternalServerError, // 500
http.StatusBadGateway, // 502
http.StatusServiceUnavailable, // 503
http.StatusGatewayTimeout, // 504
}
)
// Sender sends a request
type Sender interface {
Send(*Client, *http.Request) (*http.Response, error)
}
// DefaultSender is the default sender for the client. It implements
// an automatic retry strategy.
type DefaultSender struct {
RetryAttempts int
RetryDuration time.Duration
ValidStatusCodes []int
attempts int // used for testing
}
// Send is the default retry strategy in the client
func (ds *DefaultSender) Send(c *Client, req *http.Request) (resp *http.Response, err error) {
rr := autorest.NewRetriableRequest(req)
for attempts := 0; attempts < ds.RetryAttempts; attempts++ {
err = rr.Prepare()
if err != nil {
return resp, err
}
resp, err = c.HTTPClient.Do(rr.Request())
if err != nil || !autorest.ResponseHasStatusCode(resp, ds.ValidStatusCodes...) {
return resp, err
}
drainRespBody(resp)
autorest.DelayForBackoff(ds.RetryDuration, attempts, req.Cancel)
ds.attempts = attempts
}
ds.attempts++
return resp, err
}
// Client is the object that needs to be constructed to perform
// operations on the storage account.
type Client struct {
// HTTPClient is the http.Client used to initiate API
// requests. http.DefaultClient is used when creating a
// client.
HTTPClient *http.Client
// Sender is an interface that sends the request. Clients are
// created with a DefaultSender. The DefaultSender has an
// automatic retry strategy built in. The Sender can be customized.
Sender Sender
accountName string
accountKey []byte
useHTTPS bool
UseSharedKeyLite bool
baseURL string
apiVersion string
userAgent string
sasClient bool
accountSASToken url.Values
}
type odataResponse struct {
resp *http.Response
odata odataErrorWrapper
}
// AzureStorageServiceError contains fields of the error response from
// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx
// Some fields might be specific to certain calls.
type AzureStorageServiceError struct {
Code string `xml:"Code"`
Message string `xml:"Message"`
AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"`
QueryParameterName string `xml:"QueryParameterName"`
QueryParameterValue string `xml:"QueryParameterValue"`
Reason string `xml:"Reason"`
Lang string
StatusCode int
RequestID string
Date string
APIVersion string
}
type odataErrorMessage struct {
Lang string `json:"lang"`
Value string `json:"value"`
}
type odataError struct {
Code string `json:"code"`
Message odataErrorMessage `json:"message"`
}
type odataErrorWrapper struct {
Err odataError `json:"odata.error"`
}
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
// nor with an HTTP status code indicating success.
type UnexpectedStatusCodeError struct {
allowed []int
got int
inner error
}
func (e UnexpectedStatusCodeError) Error() string {
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
got := s(e.got)
expected := []string{}
for _, v := range e.allowed {
expected = append(expected, s(v))
}
return fmt.Sprintf("storage: status code from service response is %s; was expecting %s. Inner error: %+v", got, strings.Join(expected, " or "), e.inner)
}
// Got is the actual status code returned by Azure.
func (e UnexpectedStatusCodeError) Got() int {
return e.got
}
// Inner returns any inner error info.
func (e UnexpectedStatusCodeError) Inner() error {
return e.inner
}
// NewClientFromConnectionString creates a Client from the connection string.
func NewClientFromConnectionString(input string) (Client, error) {
// build a map of connection string key/value pairs
parts := map[string]string{}
for _, pair := range strings.Split(input, ";") {
if pair == "" {
continue
}
equalDex := strings.IndexByte(pair, '=')
if equalDex <= 0 {
return Client{}, fmt.Errorf("Invalid connection segment %q", pair)
}
value := strings.TrimSpace(pair[equalDex+1:])
key := strings.TrimSpace(strings.ToLower(pair[:equalDex]))
parts[key] = value
}
// TODO: validate parameter sets?
if parts[connectionStringAccountName] == StorageEmulatorAccountName {
return NewEmulatorClient()
}
if parts[connectionStringSAS] != "" {
endpoint := ""
if parts[connectionStringBlobEndpoint] != "" {
endpoint = parts[connectionStringBlobEndpoint]
} else if parts[connectionStringFileEndpoint] != "" {
endpoint = parts[connectionStringFileEndpoint]
} else if parts[connectionStringQueueEndpoint] != "" {
endpoint = parts[connectionStringQueueEndpoint]
} else {
endpoint = parts[connectionStringTableEndpoint]
}
return NewAccountSASClientFromEndpointToken(endpoint, parts[connectionStringSAS])
}
useHTTPS := defaultUseHTTPS
if parts[connectionStringEndpointProtocol] != "" {
useHTTPS = parts[connectionStringEndpointProtocol] == "https"
}
return NewClient(parts[connectionStringAccountName], parts[connectionStringAccountKey],
parts[connectionStringEndpointSuffix], DefaultAPIVersion, useHTTPS)
}
// NewBasicClient constructs a Client with given storage service name and
// key.
func NewBasicClient(accountName, accountKey string) (Client, error) {
if accountName == StorageEmulatorAccountName {
return NewEmulatorClient()
}
return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS)
}
// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and
// key in the referenced cloud.
func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) {
if accountName == StorageEmulatorAccountName {
return NewEmulatorClient()
}
return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS)
}
//NewEmulatorClient contructs a Client intended to only work with Azure
//Storage Emulator
func NewEmulatorClient() (Client, error) {
return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false)
}
// NewClient constructs a Client. This should be used if the caller wants
// to specify whether to use HTTPS, a specific REST API version or a custom
// storage endpoint than Azure Public Cloud.
func NewClient(accountName, accountKey, serviceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
var c Client
if !IsValidStorageAccount(accountName) {
return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName)
} else if accountKey == "" {
return c, fmt.Errorf("azure: account key required")
} else if serviceBaseURL == "" {
return c, fmt.Errorf("azure: base storage service url required")
}
key, err := base64.StdEncoding.DecodeString(accountKey)
if err != nil {
return c, fmt.Errorf("azure: malformed storage account key: %v", err)
}
c = Client{
HTTPClient: http.DefaultClient,
accountName: accountName,
accountKey: key,
useHTTPS: useHTTPS,
baseURL: serviceBaseURL,
apiVersion: apiVersion,
sasClient: false,
UseSharedKeyLite: false,
Sender: &DefaultSender{
RetryAttempts: defaultRetryAttempts,
ValidStatusCodes: defaultValidStatusCodes,
RetryDuration: defaultRetryDuration,
},
}
c.userAgent = c.getDefaultUserAgent()
return c, nil
}
// IsValidStorageAccount checks if the storage account name is valid.
// See https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account
func IsValidStorageAccount(account string) bool {
return validStorageAccount.MatchString(account)
}
// NewAccountSASClient contructs a client that uses accountSAS authorization
// for its operations.
func NewAccountSASClient(account string, token url.Values, env azure.Environment) Client {
return newSASClient(account, env.StorageEndpointSuffix, token)
}
// NewAccountSASClientFromEndpointToken constructs a client that uses accountSAS authorization
// for its operations using the specified endpoint and SAS token.
func NewAccountSASClientFromEndpointToken(endpoint string, sasToken string) (Client, error) {
u, err := url.Parse(endpoint)
if err != nil {
return Client{}, err
}
_, err = url.ParseQuery(sasToken)
if err != nil {
return Client{}, err
}
u.RawQuery = sasToken
return newSASClientFromURL(u)
}
func newSASClient(accountName, baseURL string, sasToken url.Values) Client {
c := Client{
HTTPClient: http.DefaultClient,
apiVersion: DefaultAPIVersion,
sasClient: true,
Sender: &DefaultSender{
RetryAttempts: defaultRetryAttempts,
ValidStatusCodes: defaultValidStatusCodes,
RetryDuration: defaultRetryDuration,
},
accountName: accountName,
baseURL: baseURL,
accountSASToken: sasToken,
useHTTPS: defaultUseHTTPS,
}
c.userAgent = c.getDefaultUserAgent()
// Get API version and protocol from token
c.apiVersion = sasToken.Get("sv")
if spr := sasToken.Get("spr"); spr != "" {
c.useHTTPS = spr == "https"
}
return c
}
func newSASClientFromURL(u *url.URL) (Client, error) {
// the host name will look something like this
// - foo.blob.core.windows.net
// "foo" is the account name
// "core.windows.net" is the baseURL
// find the first dot to get account name
i1 := strings.IndexByte(u.Host, '.')
if i1 < 0 {
return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host)
}
// now find the second dot to get the base URL
i2 := strings.IndexByte(u.Host[i1+1:], '.')
if i2 < 0 {
return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host[i1+1:])
}
sasToken := u.Query()
c := newSASClient(u.Host[:i1], u.Host[i1+i2+2:], sasToken)
if spr := sasToken.Get("spr"); spr == "" {
// infer from URL if not in the query params set
c.useHTTPS = u.Scheme == "https"
}
return c, nil
}
func (c Client) isServiceSASClient() bool {
return c.sasClient && c.accountSASToken == nil
}
func (c Client) isAccountSASClient() bool {
return c.sasClient && c.accountSASToken != nil
}
func (c Client) getDefaultUserAgent() string {
return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s",
runtime.Version(),
runtime.GOARCH,
runtime.GOOS,
version.Number,
c.apiVersion,
)
}
// AddToUserAgent adds an extension to the current user agent
func (c *Client) AddToUserAgent(extension string) error {
if extension != "" {
c.userAgent = fmt.Sprintf("%s %s", c.userAgent, extension)
return nil
}
return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.userAgent)
}
// protectUserAgent is used in funcs that include extraheaders as a parameter.
// It prevents the User-Agent header to be overwritten, instead if it happens to
// be present, it gets added to the current User-Agent. Use it before getStandardHeaders
func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]string {
if v, ok := extraheaders[userAgentHeader]; ok {
c.AddToUserAgent(v)
delete(extraheaders, userAgentHeader)
}
return extraheaders
}
func (c Client) getBaseURL(service string) *url.URL {
scheme := "http"
if c.useHTTPS {
scheme = "https"
}
host := ""
if c.accountName == StorageEmulatorAccountName {
switch service {
case blobServiceName:
host = storageEmulatorBlob
case tableServiceName:
host = storageEmulatorTable
case queueServiceName:
host = storageEmulatorQueue
}
} else {
host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL)
}
return &url.URL{
Scheme: scheme,
Host: host,
}
}
func (c Client) getEndpoint(service, path string, params url.Values) string {
u := c.getBaseURL(service)
// API doesn't accept path segments not starting with '/'
if !strings.HasPrefix(path, "/") {
path = fmt.Sprintf("/%v", path)
}
if c.accountName == StorageEmulatorAccountName {
path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path)
}
u.Path = path
u.RawQuery = params.Encode()
return u.String()
}
// AccountSASTokenOptions includes options for constructing
// an account SAS token.
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
type AccountSASTokenOptions struct {
APIVersion string
Services Services
ResourceTypes ResourceTypes
Permissions Permissions
Start time.Time
Expiry time.Time
IP string
UseHTTPS bool
}
// Services specify services accessible with an account SAS.
type Services struct {
Blob bool
Queue bool
Table bool
File bool
}
// ResourceTypes specify the resources accesible with an
// account SAS.
type ResourceTypes struct {
Service bool
Container bool
Object bool
}
// Permissions specifies permissions for an accountSAS.
type Permissions struct {
Read bool
Write bool
Delete bool
List bool
Add bool
Create bool
Update bool
Process bool
}
// GetAccountSASToken creates an account SAS token
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
func (c Client) GetAccountSASToken(options AccountSASTokenOptions) (url.Values, error) {
if options.APIVersion == "" {
options.APIVersion = c.apiVersion
}
if options.APIVersion < "2015-04-05" {
return url.Values{}, fmt.Errorf("account SAS does not support API versions prior to 2015-04-05. API version : %s", options.APIVersion)
}
// build services string
services := ""
if options.Services.Blob {
services += "b"
}
if options.Services.Queue {
services += "q"
}
if options.Services.Table {
services += "t"
}
if options.Services.File {
services += "f"
}
// build resources string
resources := ""
if options.ResourceTypes.Service {
resources += "s"
}
if options.ResourceTypes.Container {
resources += "c"
}
if options.ResourceTypes.Object {
resources += "o"
}
// build permissions string
permissions := ""
if options.Permissions.Read {
permissions += "r"
}
if options.Permissions.Write {
permissions += "w"
}
if options.Permissions.Delete {
permissions += "d"
}
if options.Permissions.List {
permissions += "l"
}
if options.Permissions.Add {
permissions += "a"
}
if options.Permissions.Create {
permissions += "c"
}
if options.Permissions.Update {
permissions += "u"
}
if options.Permissions.Process {
permissions += "p"
}
// build start time, if exists
start := ""
if options.Start != (time.Time{}) {
start = options.Start.UTC().Format(time.RFC3339)
}
// build expiry time
expiry := options.Expiry.UTC().Format(time.RFC3339)
protocol := "https,http"
if options.UseHTTPS {
protocol = "https"
}
stringToSign := strings.Join([]string{
c.accountName,
permissions,
services,
resources,
start,
expiry,
options.IP,
protocol,
options.APIVersion,
"",
}, "\n")
signature := c.computeHmac256(stringToSign)
sasParams := url.Values{
"sv": {options.APIVersion},
"ss": {services},
"srt": {resources},
"sp": {permissions},
"se": {expiry},
"spr": {protocol},
"sig": {signature},
}
if start != "" {
sasParams.Add("st", start)
}
if options.IP != "" {
sasParams.Add("sip", options.IP)
}
return sasParams, nil
}
// GetBlobService returns a BlobStorageClient which can operate on the blob
// service of the storage account.
func (c Client) GetBlobService() BlobStorageClient {
b := BlobStorageClient{
client: c,
}
b.client.AddToUserAgent(blobServiceName)
b.auth = sharedKey
if c.UseSharedKeyLite {
b.auth = sharedKeyLite
}
return b
}
// GetQueueService returns a QueueServiceClient which can operate on the queue
// service of the storage account.
func (c Client) GetQueueService() QueueServiceClient {
q := QueueServiceClient{
client: c,
}
q.client.AddToUserAgent(queueServiceName)
q.auth = sharedKey
if c.UseSharedKeyLite {
q.auth = sharedKeyLite
}
return q
}
// GetTableService returns a TableServiceClient which can operate on the table
// service of the storage account.
func (c Client) GetTableService() TableServiceClient {
t := TableServiceClient{
client: c,
}
t.client.AddToUserAgent(tableServiceName)
t.auth = sharedKeyForTable
if c.UseSharedKeyLite {
t.auth = sharedKeyLiteForTable
}
return t
}
// GetFileService returns a FileServiceClient which can operate on the file
// service of the storage account.
func (c Client) GetFileService() FileServiceClient {
f := FileServiceClient{
client: c,
}
f.client.AddToUserAgent(fileServiceName)
f.auth = sharedKey
if c.UseSharedKeyLite {
f.auth = sharedKeyLite
}
return f
}
func (c Client) getStandardHeaders() map[string]string {
return map[string]string{
userAgentHeader: c.userAgent,
"x-ms-version": c.apiVersion,
"x-ms-date": currentTimeRfc1123Formatted(),
}
}
func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*http.Response, error) {
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
if err != nil {
return nil, err
}
req, err := http.NewRequest(verb, url, body)
if err != nil {
return nil, errors.New("azure/storage: error creating request: " + err.Error())
}
// http.NewRequest() will automatically set req.ContentLength for a handful of types
// otherwise we will handle here.
if req.ContentLength < 1 {
if clstr, ok := headers["Content-Length"]; ok {
if cl, err := strconv.ParseInt(clstr, 10, 64); err == nil {
req.ContentLength = cl
}
}
}
for k, v := range headers {
req.Header[k] = append(req.Header[k], v) // Must bypass case munging present in `Add` by using map functions directly. See https://github.com/Azure/azure-sdk-for-go/issues/645
}
if c.isAccountSASClient() {
// append the SAS token to the query params
v := req.URL.Query()
v = mergeParams(v, c.accountSASToken)
req.URL.RawQuery = v.Encode()
}
resp, err := c.Sender.Send(&c, req)
if err != nil {
return nil, err
}
if resp.StatusCode >= 400 && resp.StatusCode <= 505 {
return resp, getErrorFromResponse(resp)
}
return resp, nil
}
func (c Client) execInternalJSONCommon(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, *http.Request, *http.Response, error) {
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
if err != nil {
return nil, nil, nil, err
}
req, err := http.NewRequest(verb, url, body)
for k, v := range headers {
req.Header.Add(k, v)
}
resp, err := c.Sender.Send(&c, req)
if err != nil {
return nil, nil, nil, err
}
respToRet := &odataResponse{resp: resp}
statusCode := resp.StatusCode
if statusCode >= 400 && statusCode <= 505 {
var respBody []byte
respBody, err = readAndCloseBody(resp.Body)
if err != nil {
return nil, nil, nil, err
}
requestID, date, version := getDebugHeaders(resp.Header)
if len(respBody) == 0 {
// no error in response body, might happen in HEAD requests
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version)
return respToRet, req, resp, err
}
// try unmarshal as odata.error json
err = json.Unmarshal(respBody, &respToRet.odata)
}
return respToRet, req, resp, err
}
func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
respToRet, _, _, err := c.execInternalJSONCommon(verb, url, headers, body, auth)
return respToRet, err
}
func (c Client) execBatchOperationJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
// execute common query, get back generated request, response etc... for more processing.
respToRet, req, resp, err := c.execInternalJSONCommon(verb, url, headers, body, auth)
if err != nil {
return nil, err
}
// return the OData in the case of executing batch commands.
// In this case we need to read the outer batch boundary and contents.
// Then we read the changeset information within the batch
var respBody []byte
respBody, err = readAndCloseBody(resp.Body)
if err != nil {
return nil, err
}
// outer multipart body
_, batchHeader, err := mime.ParseMediaType(resp.Header["Content-Type"][0])
if err != nil {
return nil, err
}
// batch details.
batchBoundary := batchHeader["boundary"]
batchPartBuf, changesetBoundary, err := genBatchReader(batchBoundary, respBody)
if err != nil {
return nil, err
}
// changeset details.
err = genChangesetReader(req, respToRet, batchPartBuf, changesetBoundary)
if err != nil {
return nil, err
}
return respToRet, nil
}
func genChangesetReader(req *http.Request, respToRet *odataResponse, batchPartBuf io.Reader, changesetBoundary string) error {
changesetMultiReader := multipart.NewReader(batchPartBuf, changesetBoundary)
changesetPart, err := changesetMultiReader.NextPart()
if err != nil {
return err
}
changesetPartBufioReader := bufio.NewReader(changesetPart)
changesetResp, err := http.ReadResponse(changesetPartBufioReader, req)
if err != nil {
return err
}
if changesetResp.StatusCode != http.StatusNoContent {
changesetBody, err := readAndCloseBody(changesetResp.Body)
err = json.Unmarshal(changesetBody, &respToRet.odata)
if err != nil {
return err
}
respToRet.resp = changesetResp
}
return nil
}
func genBatchReader(batchBoundary string, respBody []byte) (io.Reader, string, error) {
respBodyString := string(respBody)
respBodyReader := strings.NewReader(respBodyString)
// reading batchresponse
batchMultiReader := multipart.NewReader(respBodyReader, batchBoundary)
batchPart, err := batchMultiReader.NextPart()
if err != nil {
return nil, "", err
}
batchPartBufioReader := bufio.NewReader(batchPart)
_, changesetHeader, err := mime.ParseMediaType(batchPart.Header.Get("Content-Type"))
if err != nil {
return nil, "", err
}
changesetBoundary := changesetHeader["boundary"]
return batchPartBufioReader, changesetBoundary, nil
}
func readAndCloseBody(body io.ReadCloser) ([]byte, error) {
defer body.Close()
out, err := ioutil.ReadAll(body)
if err == io.EOF {
err = nil
}
return out, err
}
// reads the response body then closes it
func drainRespBody(resp *http.Response) {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}
func serviceErrFromXML(body []byte, storageErr *AzureStorageServiceError) error {
if err := xml.Unmarshal(body, storageErr); err != nil {
storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body))
return err
}
return nil
}
func serviceErrFromJSON(body []byte, storageErr *AzureStorageServiceError) error {
odataError := odataErrorWrapper{}
if err := json.Unmarshal(body, &odataError); err != nil {
storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body))
return err
}
storageErr.Code = odataError.Err.Code
storageErr.Message = odataError.Err.Message.Value
storageErr.Lang = odataError.Err.Message.Lang
return nil
}
func serviceErrFromStatusCode(code int, status string, requestID, date, version string) AzureStorageServiceError {
return AzureStorageServiceError{
StatusCode: code,
Code: status,
RequestID: requestID,
Date: date,
APIVersion: version,
Message: "no response body was available for error status code",
}
}
func (e AzureStorageServiceError) Error() string {
return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestInitiated=%s, RequestId=%s, API Version=%s, QueryParameterName=%s, QueryParameterValue=%s",
e.StatusCode, e.Code, e.Message, e.Date, e.RequestID, e.APIVersion, e.QueryParameterName, e.QueryParameterValue)
}
// checkRespCode returns UnexpectedStatusError if the given response code is not
// one of the allowed status codes; otherwise nil.
func checkRespCode(resp *http.Response, allowed []int) error {
for _, v := range allowed {
if resp.StatusCode == v {
return nil
}
}
err := getErrorFromResponse(resp)
return UnexpectedStatusCodeError{
allowed: allowed,
got: resp.StatusCode,
inner: err,
}
}
func (c Client) addMetadataToHeaders(h map[string]string, metadata map[string]string) map[string]string {
metadata = c.protectUserAgent(metadata)
for k, v := range metadata {
h[userDefinedMetadataHeaderPrefix+k] = v
}
return h
}
func getDebugHeaders(h http.Header) (requestID, date, version string) {
requestID = h.Get("x-ms-request-id")
version = h.Get("x-ms-version")
date = h.Get("Date")
return
}
func getErrorFromResponse(resp *http.Response) error {
respBody, err := readAndCloseBody(resp.Body)
if err != nil {
return err
}
requestID, date, version := getDebugHeaders(resp.Header)
if len(respBody) == 0 {
// no error in response body, might happen in HEAD requests
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version)
} else {
storageErr := AzureStorageServiceError{
StatusCode: resp.StatusCode,
RequestID: requestID,
Date: date,
APIVersion: version,
}
// response contains storage service error object, unmarshal
if resp.Header.Get("Content-Type") == "application/xml" {
errIn := serviceErrFromXML(respBody, &storageErr)
if err != nil { // error unmarshaling the error response
err = errIn
}
} else {
errIn := serviceErrFromJSON(respBody, &storageErr)
if err != nil { // error unmarshaling the error response
err = errIn
}
}
err = storageErr
}
return err
}

View File

@@ -1,38 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"net/url"
"time"
)
// SASOptions includes options used by SAS URIs for different
// services and resources.
type SASOptions struct {
APIVersion string
Start time.Time
Expiry time.Time
IP string
UseHTTPS bool
Identifier string
}
func addQueryParameter(query url.Values, key, value string) url.Values {
if value != "" {
query.Add(key, value)
}
return query
}

View File

@@ -1,640 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
// Container represents an Azure container.
type Container struct {
bsc *BlobStorageClient
Name string `xml:"Name"`
Properties ContainerProperties `xml:"Properties"`
Metadata map[string]string
sasuri url.URL
}
// Client returns the HTTP client used by the Container reference.
func (c *Container) Client() *Client {
return &c.bsc.client
}
func (c *Container) buildPath() string {
return fmt.Sprintf("/%s", c.Name)
}
// GetURL gets the canonical URL to the container.
// This method does not create a publicly accessible URL if the container
// is private and this method does not check if the blob exists.
func (c *Container) GetURL() string {
container := c.Name
if container == "" {
container = "$root"
}
return c.bsc.client.getEndpoint(blobServiceName, pathForResource(container, ""), nil)
}
// ContainerSASOptions are options to construct a container SAS
// URI.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
type ContainerSASOptions struct {
ContainerSASPermissions
OverrideHeaders
SASOptions
}
// ContainerSASPermissions includes the available permissions for
// a container SAS URI.
type ContainerSASPermissions struct {
BlobServiceSASPermissions
List bool
}
// GetSASURI creates an URL to the container which contains the Shared
// Access Signature with the specified options.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
func (c *Container) GetSASURI(options ContainerSASOptions) (string, error) {
uri := c.GetURL()
signedResource := "c"
canonicalizedResource, err := c.bsc.client.buildCanonicalizedResource(uri, c.bsc.auth, true)
if err != nil {
return "", err
}
// build permissions string
permissions := options.BlobServiceSASPermissions.buildString()
if options.List {
permissions += "l"
}
return c.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders)
}
// ContainerProperties contains various properties of a container returned from
// various endpoints like ListContainers.
type ContainerProperties struct {
LastModified string `xml:"Last-Modified"`
Etag string `xml:"Etag"`
LeaseStatus string `xml:"LeaseStatus"`
LeaseState string `xml:"LeaseState"`
LeaseDuration string `xml:"LeaseDuration"`
PublicAccess ContainerAccessType `xml:"PublicAccess"`
}
// ContainerListResponse contains the response fields from
// ListContainers call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
type ContainerListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Prefix string `xml:"Prefix"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxResults int64 `xml:"MaxResults"`
Containers []Container `xml:"Containers>Container"`
}
// BlobListResponse contains the response fields from ListBlobs call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
type BlobListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Prefix string `xml:"Prefix"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxResults int64 `xml:"MaxResults"`
Blobs []Blob `xml:"Blobs>Blob"`
// BlobPrefix is used to traverse blobs as if it were a file system.
// It is returned if ListBlobsParameters.Delimiter is specified.
// The list here can be thought of as "folders" that may contain
// other folders or blobs.
BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"`
// Delimiter is used to traverse blobs as if it were a file system.
// It is returned if ListBlobsParameters.Delimiter is specified.
Delimiter string `xml:"Delimiter"`
}
// IncludeBlobDataset has options to include in a list blobs operation
type IncludeBlobDataset struct {
Snapshots bool
Metadata bool
UncommittedBlobs bool
Copy bool
}
// ListBlobsParameters defines the set of customizable
// parameters to make a List Blobs call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
type ListBlobsParameters struct {
Prefix string
Delimiter string
Marker string
Include *IncludeBlobDataset
MaxResults uint
Timeout uint
RequestID string
}
func (p ListBlobsParameters) getParameters() url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Delimiter != "" {
out.Set("delimiter", p.Delimiter)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.Include != nil {
include := []string{}
include = addString(include, p.Include.Snapshots, "snapshots")
include = addString(include, p.Include.Metadata, "metadata")
include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs")
include = addString(include, p.Include.Copy, "copy")
fullInclude := strings.Join(include, ",")
out.Set("include", fullInclude)
}
if p.MaxResults != 0 {
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
}
if p.Timeout != 0 {
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
}
return out
}
func addString(datasets []string, include bool, text string) []string {
if include {
datasets = append(datasets, text)
}
return datasets
}
// ContainerAccessType defines the access level to the container from a public
// request.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms-
// blob-public-access" header.
type ContainerAccessType string
// Access options for containers
const (
ContainerAccessTypePrivate ContainerAccessType = ""
ContainerAccessTypeBlob ContainerAccessType = "blob"
ContainerAccessTypeContainer ContainerAccessType = "container"
)
// ContainerAccessPolicy represents each access policy in the container ACL.
type ContainerAccessPolicy struct {
ID string
StartTime time.Time
ExpiryTime time.Time
CanRead bool
CanWrite bool
CanDelete bool
}
// ContainerPermissions represents the container ACLs.
type ContainerPermissions struct {
AccessType ContainerAccessType
AccessPolicies []ContainerAccessPolicy
}
// ContainerAccessHeader references header used when setting/getting container ACL
const (
ContainerAccessHeader string = "x-ms-blob-public-access"
)
// GetBlobReference returns a Blob object for the specified blob name.
func (c *Container) GetBlobReference(name string) *Blob {
return &Blob{
Container: c,
Name: name,
}
}
// CreateContainerOptions includes the options for a create container operation
type CreateContainerOptions struct {
Timeout uint
Access ContainerAccessType `header:"x-ms-blob-public-access"`
RequestID string `header:"x-ms-client-request-id"`
}
// Create creates a blob container within the storage account
// with given name and access level. Returns error if container already exists.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Container
func (c *Container) Create(options *CreateContainerOptions) error {
resp, err := c.create(options)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusCreated})
}
// CreateIfNotExists creates a blob container if it does not exist. Returns
// true if container is newly created or false if container already exists.
func (c *Container) CreateIfNotExists(options *CreateContainerOptions) (bool, error) {
resp, err := c.create(options)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict {
return resp.StatusCode == http.StatusCreated, nil
}
}
return false, err
}
func (c *Container) create(options *CreateContainerOptions) (*http.Response, error) {
query := url.Values{"restype": {"container"}}
headers := c.bsc.client.getStandardHeaders()
headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata)
if options != nil {
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query)
return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
}
// Exists returns true if a container with given name exists
// on the storage account, otherwise returns false.
func (c *Container) Exists() (bool, error) {
q := url.Values{"restype": {"container"}}
var uri string
if c.bsc.client.isServiceSASClient() {
q = mergeParams(q, c.sasuri.Query())
newURI := c.sasuri
newURI.RawQuery = q.Encode()
uri = newURI.String()
} else {
uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
}
headers := c.bsc.client.getStandardHeaders()
resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusOK, nil
}
}
return false, err
}
// SetContainerPermissionOptions includes options for a set container permissions operation
type SetContainerPermissionOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
RequestID string `header:"x-ms-client-request-id"`
}
// SetPermissions sets up container permissions
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Container-ACL
func (c *Container) SetPermissions(permissions ContainerPermissions, options *SetContainerPermissionOptions) error {
body, length, err := generateContainerACLpayload(permissions.AccessPolicies)
if err != nil {
return err
}
params := url.Values{
"restype": {"container"},
"comp": {"acl"},
}
headers := c.bsc.client.getStandardHeaders()
headers = addToHeaders(headers, ContainerAccessHeader, string(permissions.AccessType))
headers["Content-Length"] = strconv.Itoa(length)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusOK})
}
// GetContainerPermissionOptions includes options for a get container permissions operation
type GetContainerPermissionOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
RequestID string `header:"x-ms-client-request-id"`
}
// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx
// If timeout is 0 then it will not be passed to Azure
// leaseID will only be passed to Azure if populated
func (c *Container) GetPermissions(options *GetContainerPermissionOptions) (*ContainerPermissions, error) {
params := url.Values{
"restype": {"container"},
"comp": {"acl"},
}
headers := c.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var ap AccessPolicy
err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList)
if err != nil {
return nil, err
}
return buildAccessPolicy(ap, &resp.Header), nil
}
func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions {
// containerAccess. Blob, Container, empty
containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader))
permissions := ContainerPermissions{
AccessType: ContainerAccessType(containerAccess),
AccessPolicies: []ContainerAccessPolicy{},
}
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
capd := ContainerAccessPolicy{
ID: policy.ID,
StartTime: policy.AccessPolicy.StartTime,
ExpiryTime: policy.AccessPolicy.ExpiryTime,
}
capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w")
capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
permissions.AccessPolicies = append(permissions.AccessPolicies, capd)
}
return &permissions
}
// DeleteContainerOptions includes options for a delete container operation
type DeleteContainerOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
RequestID string `header:"x-ms-client-request-id"`
}
// Delete deletes the container with given name on the storage
// account. If the container does not exist returns error.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container
func (c *Container) Delete(options *DeleteContainerOptions) error {
resp, err := c.delete(options)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusAccepted})
}
// DeleteIfExists deletes the container with given name on the storage
// account if it exists. Returns true if container is deleted with this call, or
// false if the container did not exist at the time of the Delete Container
// operation.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container
func (c *Container) DeleteIfExists(options *DeleteContainerOptions) (bool, error) {
resp, err := c.delete(options)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusAccepted, nil
}
}
return false, err
}
func (c *Container) delete(options *DeleteContainerOptions) (*http.Response, error) {
query := url.Values{"restype": {"container"}}
headers := c.bsc.client.getStandardHeaders()
if options != nil {
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query)
return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth)
}
// ListBlobs returns an object that contains list of blobs in the container,
// pagination token and other information in the response of List Blobs call.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Blobs
func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) {
q := mergeParams(params.getParameters(), url.Values{
"restype": {"container"},
"comp": {"list"},
})
var uri string
if c.bsc.client.isServiceSASClient() {
q = mergeParams(q, c.sasuri.Query())
newURI := c.sasuri
newURI.RawQuery = q.Encode()
uri = newURI.String()
} else {
uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
}
headers := c.bsc.client.getStandardHeaders()
headers = addToHeaders(headers, "x-ms-client-request-id", params.RequestID)
var out BlobListResponse
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
if err != nil {
return out, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
for i := range out.Blobs {
out.Blobs[i].Container = c
}
return out, err
}
// ContainerMetadataOptions includes options for container metadata operations
type ContainerMetadataOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
RequestID string `header:"x-ms-client-request-id"`
}
// SetMetadata replaces the metadata for the specified container.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetBlobMetadata. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-metadata
func (c *Container) SetMetadata(options *ContainerMetadataOptions) error {
params := url.Values{
"comp": {"metadata"},
"restype": {"container"},
}
headers := c.bsc.client.getStandardHeaders()
headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusOK})
}
// GetMetadata returns all user-defined metadata for the specified container.
//
// All metadata keys will be returned in lower case. (HTTP header
// names are case-insensitive.)
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-metadata
func (c *Container) GetMetadata(options *ContainerMetadataOptions) error {
params := url.Values{
"comp": {"metadata"},
"restype": {"container"},
}
headers := c.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
c.writeMetadata(resp.Header)
return nil
}
func (c *Container) writeMetadata(h http.Header) {
c.Metadata = writeMetadata(h)
}
func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) {
sil := SignedIdentifiers{
SignedIdentifiers: []SignedIdentifier{},
}
for _, capd := range policies {
permission := capd.generateContainerPermissions()
signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission)
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
}
return xmlMarshal(sil)
}
func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) {
// generate the permissions string (rwd).
// still want the end user API to have bool flags.
permissions = ""
if capd.CanRead {
permissions += "r"
}
if capd.CanWrite {
permissions += "w"
}
if capd.CanDelete {
permissions += "d"
}
return permissions
}
// GetProperties updated the properties of the container.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-properties
func (c *Container) GetProperties() error {
params := url.Values{
"restype": {"container"},
}
headers := c.bsc.client.getStandardHeaders()
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
if err != nil {
return err
}
defer resp.Body.Close()
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
// update properties
c.Properties.Etag = resp.Header.Get(headerEtag)
c.Properties.LeaseStatus = resp.Header.Get("x-ms-lease-status")
c.Properties.LeaseState = resp.Header.Get("x-ms-lease-state")
c.Properties.LeaseDuration = resp.Header.Get("x-ms-lease-duration")
c.Properties.LastModified = resp.Header.Get("Last-Modified")
c.Properties.PublicAccess = ContainerAccessType(resp.Header.Get(ContainerAccessHeader))
return nil
}

View File

@@ -1,237 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
)
const (
blobCopyStatusPending = "pending"
blobCopyStatusSuccess = "success"
blobCopyStatusAborted = "aborted"
blobCopyStatusFailed = "failed"
)
// CopyOptions includes the options for a copy blob operation
type CopyOptions struct {
Timeout uint
Source CopyOptionsConditions
Destiny CopyOptionsConditions
RequestID string
}
// IncrementalCopyOptions includes the options for an incremental copy blob operation
type IncrementalCopyOptions struct {
Timeout uint
Destination IncrementalCopyOptionsConditions
RequestID string
}
// CopyOptionsConditions includes some conditional options in a copy blob operation
type CopyOptionsConditions struct {
LeaseID string
IfModifiedSince *time.Time
IfUnmodifiedSince *time.Time
IfMatch string
IfNoneMatch string
}
// IncrementalCopyOptionsConditions includes some conditional options in a copy blob operation
type IncrementalCopyOptionsConditions struct {
IfModifiedSince *time.Time
IfUnmodifiedSince *time.Time
IfMatch string
IfNoneMatch string
}
// Copy starts a blob copy operation and waits for the operation to
// complete. sourceBlob parameter must be a canonical URL to the blob (can be
// obtained using the GetURL method.) There is no SLA on blob copy and therefore
// this helper method works faster on smaller files.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
func (b *Blob) Copy(sourceBlob string, options *CopyOptions) error {
copyID, err := b.StartCopy(sourceBlob, options)
if err != nil {
return err
}
return b.WaitForCopy(copyID)
}
// StartCopy starts a blob copy operation.
// sourceBlob parameter must be a canonical URL to the blob (can be
// obtained using the GetURL method.)
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) {
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-copy-source"] = sourceBlob
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
// source
headers = addToHeaders(headers, "x-ms-source-lease-id", options.Source.LeaseID)
headers = addTimeToHeaders(headers, "x-ms-source-if-modified-since", options.Source.IfModifiedSince)
headers = addTimeToHeaders(headers, "x-ms-source-if-unmodified-since", options.Source.IfUnmodifiedSince)
headers = addToHeaders(headers, "x-ms-source-if-match", options.Source.IfMatch)
headers = addToHeaders(headers, "x-ms-source-if-none-match", options.Source.IfNoneMatch)
//destiny
headers = addToHeaders(headers, "x-ms-lease-id", options.Destiny.LeaseID)
headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destiny.IfModifiedSince)
headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destiny.IfUnmodifiedSince)
headers = addToHeaders(headers, "x-ms-if-match", options.Destiny.IfMatch)
headers = addToHeaders(headers, "x-ms-if-none-match", options.Destiny.IfNoneMatch)
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return "", err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{http.StatusAccepted, http.StatusCreated}); err != nil {
return "", err
}
copyID := resp.Header.Get("x-ms-copy-id")
if copyID == "" {
return "", errors.New("Got empty copy id header")
}
return copyID, nil
}
// AbortCopyOptions includes the options for an abort blob operation
type AbortCopyOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
RequestID string `header:"x-ms-client-request-id"`
}
// AbortCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function.
// copyID is generated from StartBlobCopy function.
// currentLeaseID is required IF the destination blob has an active lease on it.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Abort-Copy-Blob
func (b *Blob) AbortCopy(copyID string, options *AbortCopyOptions) error {
params := url.Values{
"comp": {"copy"},
"copyid": {copyID},
}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-copy-action"] = "abort"
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
// WaitForCopy loops until a BlobCopy operation is completed (or fails with error)
func (b *Blob) WaitForCopy(copyID string) error {
for {
err := b.GetProperties(nil)
if err != nil {
return err
}
if b.Properties.CopyID != copyID {
return errBlobCopyIDMismatch
}
switch b.Properties.CopyStatus {
case blobCopyStatusSuccess:
return nil
case blobCopyStatusPending:
continue
case blobCopyStatusAborted:
return errBlobCopyAborted
case blobCopyStatusFailed:
return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", b.Properties.CopyID, b.Properties.CopyStatusDescription)
default:
return fmt.Errorf("storage: unhandled blob copy status: '%s'", b.Properties.CopyStatus)
}
}
}
// IncrementalCopyBlob copies a snapshot of a source blob and copies to referring blob
// sourceBlob parameter must be a valid snapshot URL of the original blob.
// THe original blob mut be public, or use a Shared Access Signature.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob .
func (b *Blob) IncrementalCopyBlob(sourceBlobURL string, snapshotTime time.Time, options *IncrementalCopyOptions) (string, error) {
params := url.Values{"comp": {"incrementalcopy"}}
// need formatting to 7 decimal places so it's friendly to Windows and *nix
snapshotTimeFormatted := snapshotTime.Format("2006-01-02T15:04:05.0000000Z")
u, err := url.Parse(sourceBlobURL)
if err != nil {
return "", err
}
query := u.Query()
query.Add("snapshot", snapshotTimeFormatted)
encodedQuery := query.Encode()
encodedQuery = strings.Replace(encodedQuery, "%3A", ":", -1)
u.RawQuery = encodedQuery
snapshotURL := u.String()
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-copy-source"] = snapshotURL
if options != nil {
addTimeout(params, options.Timeout)
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destination.IfModifiedSince)
headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destination.IfUnmodifiedSince)
headers = addToHeaders(headers, "x-ms-if-match", options.Destination.IfMatch)
headers = addToHeaders(headers, "x-ms-if-none-match", options.Destination.IfNoneMatch)
}
// get URI of destination blob
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return "", err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{http.StatusAccepted}); err != nil {
return "", err
}
copyID := resp.Header.Get("x-ms-copy-id")
if copyID == "" {
return "", errors.New("Got empty copy id header")
}
return copyID, nil
}

View File

@@ -1,238 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"net/http"
"net/url"
"sync"
)
// Directory represents a directory on a share.
type Directory struct {
fsc *FileServiceClient
Metadata map[string]string
Name string `xml:"Name"`
parent *Directory
Properties DirectoryProperties
share *Share
}
// DirectoryProperties contains various properties of a directory.
type DirectoryProperties struct {
LastModified string `xml:"Last-Modified"`
Etag string `xml:"Etag"`
}
// ListDirsAndFilesParameters defines the set of customizable parameters to
// make a List Files and Directories call.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
type ListDirsAndFilesParameters struct {
Prefix string
Marker string
MaxResults uint
Timeout uint
}
// DirsAndFilesListResponse contains the response fields from
// a List Files and Directories call.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
type DirsAndFilesListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Marker string `xml:"Marker"`
MaxResults int64 `xml:"MaxResults"`
Directories []Directory `xml:"Entries>Directory"`
Files []File `xml:"Entries>File"`
NextMarker string `xml:"NextMarker"`
}
// builds the complete directory path for this directory object.
func (d *Directory) buildPath() string {
path := ""
current := d
for current.Name != "" {
path = "/" + current.Name + path
current = current.parent
}
return d.share.buildPath() + path
}
// Create this directory in the associated share.
// If a directory with the same name already exists, the operation fails.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory
func (d *Directory) Create(options *FileRequestOptions) error {
// if this is the root directory exit early
if d.parent == nil {
return nil
}
params := prepareOptions(options)
headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, params, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated})
if err != nil {
return err
}
d.updateEtagAndLastModified(headers)
return nil
}
// CreateIfNotExists creates this directory under the associated share if the
// directory does not exists. Returns true if the directory is newly created or
// false if the directory already exists.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory
func (d *Directory) CreateIfNotExists(options *FileRequestOptions) (bool, error) {
// if this is the root directory exit early
if d.parent == nil {
return false, nil
}
params := prepareOptions(options)
resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, params, nil)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict {
if resp.StatusCode == http.StatusCreated {
d.updateEtagAndLastModified(resp.Header)
return true, nil
}
return false, d.FetchAttributes(nil)
}
}
return false, err
}
// Delete removes this directory. It must be empty in order to be deleted.
// If the directory does not exist the operation fails.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory
func (d *Directory) Delete(options *FileRequestOptions) error {
return d.fsc.deleteResource(d.buildPath(), resourceDirectory, options)
}
// DeleteIfExists removes this directory if it exists.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory
func (d *Directory) DeleteIfExists(options *FileRequestOptions) (bool, error) {
resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory, options)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusAccepted, nil
}
}
return false, err
}
// Exists returns true if this directory exists.
func (d *Directory) Exists() (bool, error) {
exists, headers, err := d.fsc.resourceExists(d.buildPath(), resourceDirectory)
if exists {
d.updateEtagAndLastModified(headers)
}
return exists, err
}
// FetchAttributes retrieves metadata for this directory.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-directory-properties
func (d *Directory) FetchAttributes(options *FileRequestOptions) error {
params := prepareOptions(options)
headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, params, http.MethodHead)
if err != nil {
return err
}
d.updateEtagAndLastModified(headers)
d.Metadata = getMetadataFromHeaders(headers)
return nil
}
// GetDirectoryReference returns a child Directory object for this directory.
func (d *Directory) GetDirectoryReference(name string) *Directory {
return &Directory{
fsc: d.fsc,
Name: name,
parent: d,
share: d.share,
}
}
// GetFileReference returns a child File object for this directory.
func (d *Directory) GetFileReference(name string) *File {
return &File{
fsc: d.fsc,
Name: name,
parent: d,
share: d.share,
mutex: &sync.Mutex{},
}
}
// ListDirsAndFiles returns a list of files and directories under this directory.
// It also contains a pagination token and other response details.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) {
q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory))
resp, err := d.fsc.listContent(d.buildPath(), q, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out DirsAndFilesListResponse
err = xmlUnmarshal(resp.Body, &out)
return &out, err
}
// SetMetadata replaces the metadata for this directory.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetDirectoryMetadata. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Directory-Metadata
func (d *Directory) SetMetadata(options *FileRequestOptions) error {
headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil), options)
if err != nil {
return err
}
d.updateEtagAndLastModified(headers)
return nil
}
// updates Etag and last modified date
func (d *Directory) updateEtagAndLastModified(headers http.Header) {
d.Properties.Etag = headers.Get("Etag")
d.Properties.LastModified = headers.Get("Last-Modified")
}
// URL gets the canonical URL to this directory.
// This method does not create a publicly accessible URL if the directory
// is private and this method does not check if the directory exists.
func (d *Directory) URL() string {
return d.fsc.client.getEndpoint(fileServiceName, d.buildPath(), url.Values{})
}

View File

@@ -1,466 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
uuid "github.com/satori/go.uuid"
)
// Annotating as secure for gas scanning
/* #nosec */
const (
partitionKeyNode = "PartitionKey"
rowKeyNode = "RowKey"
etagErrorTemplate = "Etag didn't match: %v"
)
var (
errEmptyPayload = errors.New("Empty payload is not a valid metadata level for this operation")
errNilPreviousResult = errors.New("The previous results page is nil")
errNilNextLink = errors.New("There are no more pages in this query results")
)
// Entity represents an entity inside an Azure table.
type Entity struct {
Table *Table
PartitionKey string
RowKey string
TimeStamp time.Time
OdataMetadata string
OdataType string
OdataID string
OdataEtag string
OdataEditLink string
Properties map[string]interface{}
}
// GetEntityReference returns an Entity object with the specified
// partition key and row key.
func (t *Table) GetEntityReference(partitionKey, rowKey string) *Entity {
return &Entity{
PartitionKey: partitionKey,
RowKey: rowKey,
Table: t,
}
}
// EntityOptions includes options for entity operations.
type EntityOptions struct {
Timeout uint
RequestID string `header:"x-ms-client-request-id"`
}
// GetEntityOptions includes options for a get entity operation
type GetEntityOptions struct {
Select []string
RequestID string `header:"x-ms-client-request-id"`
}
// Get gets the referenced entity. Which properties to get can be
// specified using the select option.
// See:
// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities
func (e *Entity) Get(timeout uint, ml MetadataLevel, options *GetEntityOptions) error {
if ml == EmptyPayload {
return errEmptyPayload
}
// RowKey and PartitionKey could be lost if not included in the query
// As those are the entity identifiers, it is best if they are not lost
rk := e.RowKey
pk := e.PartitionKey
query := url.Values{
"timeout": {strconv.FormatUint(uint64(timeout), 10)},
}
headers := e.Table.tsc.client.getStandardHeaders()
headers[headerAccept] = string(ml)
if options != nil {
if len(options.Select) > 0 {
query.Add("$select", strings.Join(options.Select, ","))
}
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
resp, err := e.Table.tsc.client.exec(http.MethodGet, uri, headers, nil, e.Table.tsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
err = json.Unmarshal(respBody, e)
if err != nil {
return err
}
e.PartitionKey = pk
e.RowKey = rk
return nil
}
// Insert inserts the referenced entity in its table.
// The function fails if there is an entity with the same
// PartitionKey and RowKey in the table.
// ml determines the level of detail of metadata in the operation response,
// or no data at all.
// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-entity
func (e *Entity) Insert(ml MetadataLevel, options *EntityOptions) error {
query, headers := options.getParameters()
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
body, err := json.Marshal(e)
if err != nil {
return err
}
headers = addBodyRelatedHeaders(headers, len(body))
headers = addReturnContentHeaders(headers, ml)
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.Table.buildPath(), query)
resp, err := e.Table.tsc.client.exec(http.MethodPost, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if ml != EmptyPayload {
if err = checkRespCode(resp, []int{http.StatusCreated}); err != nil {
return err
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if err = e.UnmarshalJSON(data); err != nil {
return err
}
} else {
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
return err
}
}
return nil
}
// Update updates the contents of an entity. The function fails if there is no entity
// with the same PartitionKey and RowKey in the table or if the ETag is different
// than the one in Azure.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/update-entity2
func (e *Entity) Update(force bool, options *EntityOptions) error {
return e.updateMerge(force, http.MethodPut, options)
}
// Merge merges the contents of entity specified with PartitionKey and RowKey
// with the content specified in Properties.
// The function fails if there is no entity with the same PartitionKey and
// RowKey in the table or if the ETag is different than the one in Azure.
// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/merge-entity
func (e *Entity) Merge(force bool, options *EntityOptions) error {
return e.updateMerge(force, "MERGE", options)
}
// Delete deletes the entity.
// The function fails if there is no entity with the same PartitionKey and
// RowKey in the table or if the ETag is different than the one in Azure.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-entity1
func (e *Entity) Delete(force bool, options *EntityOptions) error {
query, headers := options.getParameters()
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
headers = addIfMatchHeader(headers, force, e.OdataEtag)
headers = addReturnContentHeaders(headers, EmptyPayload)
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
resp, err := e.Table.tsc.client.exec(http.MethodDelete, uri, headers, nil, e.Table.tsc.auth)
if err != nil {
if resp.StatusCode == http.StatusPreconditionFailed {
return fmt.Errorf(etagErrorTemplate, err)
}
return err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
return err
}
return e.updateTimestamp(resp.Header)
}
// InsertOrReplace inserts an entity or replaces the existing one.
// Read more: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-or-replace-entity
func (e *Entity) InsertOrReplace(options *EntityOptions) error {
return e.insertOr(http.MethodPut, options)
}
// InsertOrMerge inserts an entity or merges the existing one.
// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/insert-or-merge-entity
func (e *Entity) InsertOrMerge(options *EntityOptions) error {
return e.insertOr("MERGE", options)
}
func (e *Entity) buildPath() string {
return fmt.Sprintf("%s(PartitionKey='%s', RowKey='%s')", e.Table.buildPath(), e.PartitionKey, e.RowKey)
}
// MarshalJSON is a custom marshaller for entity
func (e *Entity) MarshalJSON() ([]byte, error) {
completeMap := map[string]interface{}{}
completeMap[partitionKeyNode] = e.PartitionKey
completeMap[rowKeyNode] = e.RowKey
for k, v := range e.Properties {
typeKey := strings.Join([]string{k, OdataTypeSuffix}, "")
switch t := v.(type) {
case []byte:
completeMap[typeKey] = OdataBinary
completeMap[k] = t
case time.Time:
completeMap[typeKey] = OdataDateTime
completeMap[k] = t.Format(time.RFC3339Nano)
case uuid.UUID:
completeMap[typeKey] = OdataGUID
completeMap[k] = t.String()
case int64:
completeMap[typeKey] = OdataInt64
completeMap[k] = fmt.Sprintf("%v", v)
case float32, float64:
completeMap[typeKey] = OdataDouble
completeMap[k] = fmt.Sprintf("%v", v)
default:
completeMap[k] = v
}
if strings.HasSuffix(k, OdataTypeSuffix) {
if !(completeMap[k] == OdataBinary ||
completeMap[k] == OdataDateTime ||
completeMap[k] == OdataGUID ||
completeMap[k] == OdataInt64 ||
completeMap[k] == OdataDouble) {
return nil, fmt.Errorf("Odata.type annotation %v value is not valid", k)
}
valueKey := strings.TrimSuffix(k, OdataTypeSuffix)
if _, ok := completeMap[valueKey]; !ok {
return nil, fmt.Errorf("Odata.type annotation %v defined without value defined", k)
}
}
}
return json.Marshal(completeMap)
}
// UnmarshalJSON is a custom unmarshaller for entities
func (e *Entity) UnmarshalJSON(data []byte) error {
errorTemplate := "Deserializing error: %v"
props := map[string]interface{}{}
err := json.Unmarshal(data, &props)
if err != nil {
return err
}
// deselialize metadata
e.OdataMetadata = stringFromMap(props, "odata.metadata")
e.OdataType = stringFromMap(props, "odata.type")
e.OdataID = stringFromMap(props, "odata.id")
e.OdataEtag = stringFromMap(props, "odata.etag")
e.OdataEditLink = stringFromMap(props, "odata.editLink")
e.PartitionKey = stringFromMap(props, partitionKeyNode)
e.RowKey = stringFromMap(props, rowKeyNode)
// deserialize timestamp
timeStamp, ok := props["Timestamp"]
if ok {
str, ok := timeStamp.(string)
if !ok {
return fmt.Errorf(errorTemplate, "Timestamp casting error")
}
t, err := time.Parse(time.RFC3339Nano, str)
if err != nil {
return fmt.Errorf(errorTemplate, err)
}
e.TimeStamp = t
}
delete(props, "Timestamp")
delete(props, "Timestamp@odata.type")
// deserialize entity (user defined fields)
for k, v := range props {
if strings.HasSuffix(k, OdataTypeSuffix) {
valueKey := strings.TrimSuffix(k, OdataTypeSuffix)
str, ok := props[valueKey].(string)
if !ok {
return fmt.Errorf(errorTemplate, fmt.Sprintf("%v casting error", v))
}
switch v {
case OdataBinary:
props[valueKey], err = base64.StdEncoding.DecodeString(str)
if err != nil {
return fmt.Errorf(errorTemplate, err)
}
case OdataDateTime:
t, err := time.Parse("2006-01-02T15:04:05Z", str)
if err != nil {
return fmt.Errorf(errorTemplate, err)
}
props[valueKey] = t
case OdataGUID:
props[valueKey] = uuid.FromStringOrNil(str)
case OdataInt64:
i, err := strconv.ParseInt(str, 10, 64)
if err != nil {
return fmt.Errorf(errorTemplate, err)
}
props[valueKey] = i
case OdataDouble:
f, err := strconv.ParseFloat(str, 64)
if err != nil {
return fmt.Errorf(errorTemplate, err)
}
props[valueKey] = f
default:
return fmt.Errorf(errorTemplate, fmt.Sprintf("%v is not supported", v))
}
delete(props, k)
}
}
e.Properties = props
return nil
}
func getAndDelete(props map[string]interface{}, key string) interface{} {
if value, ok := props[key]; ok {
delete(props, key)
return value
}
return nil
}
func addIfMatchHeader(h map[string]string, force bool, etag string) map[string]string {
if force {
h[headerIfMatch] = "*"
} else {
h[headerIfMatch] = etag
}
return h
}
// updates Etag and timestamp
func (e *Entity) updateEtagAndTimestamp(headers http.Header) error {
e.OdataEtag = headers.Get(headerEtag)
return e.updateTimestamp(headers)
}
func (e *Entity) updateTimestamp(headers http.Header) error {
str := headers.Get(headerDate)
t, err := time.Parse(time.RFC1123, str)
if err != nil {
return fmt.Errorf("Update timestamp error: %v", err)
}
e.TimeStamp = t
return nil
}
func (e *Entity) insertOr(verb string, options *EntityOptions) error {
query, headers := options.getParameters()
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
body, err := json.Marshal(e)
if err != nil {
return err
}
headers = addBodyRelatedHeaders(headers, len(body))
headers = addReturnContentHeaders(headers, EmptyPayload)
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
return err
}
return e.updateEtagAndTimestamp(resp.Header)
}
func (e *Entity) updateMerge(force bool, verb string, options *EntityOptions) error {
query, headers := options.getParameters()
headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders())
body, err := json.Marshal(e)
if err != nil {
return err
}
headers = addBodyRelatedHeaders(headers, len(body))
headers = addIfMatchHeader(headers, force, e.OdataEtag)
headers = addReturnContentHeaders(headers, EmptyPayload)
uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query)
resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth)
if err != nil {
if resp.StatusCode == http.StatusPreconditionFailed {
return fmt.Errorf(etagErrorTemplate, err)
}
return err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
return err
}
return e.updateEtagAndTimestamp(resp.Header)
}
func stringFromMap(props map[string]interface{}, key string) string {
value := getAndDelete(props, key)
if value != nil {
return value.(string)
}
return ""
}
func (options *EntityOptions) getParameters() (url.Values, map[string]string) {
query := url.Values{}
headers := map[string]string{}
if options != nil {
query = addTimeout(query, options.Timeout)
headers = headersFromStruct(*options)
}
return query, headers
}

View File

@@ -1,484 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"sync"
)
const fourMB = uint64(4194304)
const oneTB = uint64(1099511627776)
// Export maximum range and file sizes
// MaxRangeSize defines the maximum size in bytes for a file range.
const MaxRangeSize = fourMB
// MaxFileSize defines the maximum size in bytes for a file.
const MaxFileSize = oneTB
// File represents a file on a share.
type File struct {
fsc *FileServiceClient
Metadata map[string]string
Name string `xml:"Name"`
parent *Directory
Properties FileProperties `xml:"Properties"`
share *Share
FileCopyProperties FileCopyState
mutex *sync.Mutex
}
// FileProperties contains various properties of a file.
type FileProperties struct {
CacheControl string `header:"x-ms-cache-control"`
Disposition string `header:"x-ms-content-disposition"`
Encoding string `header:"x-ms-content-encoding"`
Etag string
Language string `header:"x-ms-content-language"`
LastModified string
Length uint64 `xml:"Content-Length" header:"x-ms-content-length"`
MD5 string `header:"x-ms-content-md5"`
Type string `header:"x-ms-content-type"`
}
// FileCopyState contains various properties of a file copy operation.
type FileCopyState struct {
CompletionTime string
ID string `header:"x-ms-copy-id"`
Progress string
Source string
Status string `header:"x-ms-copy-status"`
StatusDesc string
}
// FileStream contains file data returned from a call to GetFile.
type FileStream struct {
Body io.ReadCloser
ContentMD5 string
}
// FileRequestOptions will be passed to misc file operations.
// Currently just Timeout (in seconds) but could expand.
type FileRequestOptions struct {
Timeout uint // timeout duration in seconds.
}
func prepareOptions(options *FileRequestOptions) url.Values {
params := url.Values{}
if options != nil {
params = addTimeout(params, options.Timeout)
}
return params
}
// FileRanges contains a list of file range information for a file.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
type FileRanges struct {
ContentLength uint64
LastModified string
ETag string
FileRanges []FileRange `xml:"Range"`
}
// FileRange contains range information for a file.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
type FileRange struct {
Start uint64 `xml:"Start"`
End uint64 `xml:"End"`
}
func (fr FileRange) String() string {
return fmt.Sprintf("bytes=%d-%d", fr.Start, fr.End)
}
// builds the complete file path for this file object
func (f *File) buildPath() string {
return f.parent.buildPath() + "/" + f.Name
}
// ClearRange releases the specified range of space in a file.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range
func (f *File) ClearRange(fileRange FileRange, options *FileRequestOptions) error {
var timeout *uint
if options != nil {
timeout = &options.Timeout
}
headers, err := f.modifyRange(nil, fileRange, timeout, nil)
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
return nil
}
// Create creates a new file or replaces an existing one.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-File
func (f *File) Create(maxSize uint64, options *FileRequestOptions) error {
if maxSize > oneTB {
return fmt.Errorf("max file size is 1TB")
}
params := prepareOptions(options)
headers := headersFromStruct(f.Properties)
headers["x-ms-content-length"] = strconv.FormatUint(maxSize, 10)
headers["x-ms-type"] = "file"
outputHeaders, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, headers), []int{http.StatusCreated})
if err != nil {
return err
}
f.Properties.Length = maxSize
f.updateEtagAndLastModified(outputHeaders)
return nil
}
// CopyFile operation copied a file/blob from the sourceURL to the path provided.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/copy-file
func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error {
extraHeaders := map[string]string{
"x-ms-type": "file",
"x-ms-copy-source": sourceURL,
}
params := prepareOptions(options)
headers, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted})
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id")
f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status")
return nil
}
// Delete immediately removes this file from the storage account.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2
func (f *File) Delete(options *FileRequestOptions) error {
return f.fsc.deleteResource(f.buildPath(), resourceFile, options)
}
// DeleteIfExists removes this file if it exists.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2
func (f *File) DeleteIfExists(options *FileRequestOptions) (bool, error) {
resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile, options)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusAccepted, nil
}
}
return false, err
}
// GetFileOptions includes options for a get file operation
type GetFileOptions struct {
Timeout uint
GetContentMD5 bool
}
// DownloadToStream operation downloads the file.
//
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
func (f *File) DownloadToStream(options *FileRequestOptions) (io.ReadCloser, error) {
params := prepareOptions(options)
resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, nil)
if err != nil {
return nil, err
}
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
drainRespBody(resp)
return nil, err
}
return resp.Body, nil
}
// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
func (f *File) DownloadRangeToStream(fileRange FileRange, options *GetFileOptions) (fs FileStream, err error) {
extraHeaders := map[string]string{
"Range": fileRange.String(),
}
params := url.Values{}
if options != nil {
if options.GetContentMD5 {
if isRangeTooBig(fileRange) {
return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true")
}
extraHeaders["x-ms-range-get-content-md5"] = "true"
}
params = addTimeout(params, options.Timeout)
}
resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, extraHeaders)
if err != nil {
return fs, err
}
if err = checkRespCode(resp, []int{http.StatusOK, http.StatusPartialContent}); err != nil {
drainRespBody(resp)
return fs, err
}
fs.Body = resp.Body
if options != nil && options.GetContentMD5 {
fs.ContentMD5 = resp.Header.Get("Content-MD5")
}
return fs, nil
}
// Exists returns true if this file exists.
func (f *File) Exists() (bool, error) {
exists, headers, err := f.fsc.resourceExists(f.buildPath(), resourceFile)
if exists {
f.updateEtagAndLastModified(headers)
f.updateProperties(headers)
}
return exists, err
}
// FetchAttributes updates metadata and properties for this file.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-properties
func (f *File) FetchAttributes(options *FileRequestOptions) error {
params := prepareOptions(options)
headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, params, http.MethodHead)
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
f.updateProperties(headers)
f.Metadata = getMetadataFromHeaders(headers)
return nil
}
// returns true if the range is larger than 4MB
func isRangeTooBig(fileRange FileRange) bool {
if fileRange.End-fileRange.Start > fourMB {
return true
}
return false
}
// ListRangesOptions includes options for a list file ranges operation
type ListRangesOptions struct {
Timeout uint
ListRange *FileRange
}
// ListRanges returns the list of valid ranges for this file.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges
func (f *File) ListRanges(options *ListRangesOptions) (*FileRanges, error) {
params := url.Values{"comp": {"rangelist"}}
// add optional range to list
var headers map[string]string
if options != nil {
params = addTimeout(params, options.Timeout)
if options.ListRange != nil {
headers = make(map[string]string)
headers["Range"] = options.ListRange.String()
}
}
resp, err := f.fsc.listContent(f.buildPath(), params, headers)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var cl uint64
cl, err = strconv.ParseUint(resp.Header.Get("x-ms-content-length"), 10, 64)
if err != nil {
ioutil.ReadAll(resp.Body)
return nil, err
}
var out FileRanges
out.ContentLength = cl
out.ETag = resp.Header.Get("ETag")
out.LastModified = resp.Header.Get("Last-Modified")
err = xmlUnmarshal(resp.Body, &out)
return &out, err
}
// modifies a range of bytes in this file
func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, timeout *uint, contentMD5 *string) (http.Header, error) {
if err := f.fsc.checkForStorageEmulator(); err != nil {
return nil, err
}
if fileRange.End < fileRange.Start {
return nil, errors.New("the value for rangeEnd must be greater than or equal to rangeStart")
}
if bytes != nil && isRangeTooBig(fileRange) {
return nil, errors.New("range cannot exceed 4MB in size")
}
params := url.Values{"comp": {"range"}}
if timeout != nil {
params = addTimeout(params, *timeout)
}
uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), params)
// default to clear
write := "clear"
cl := uint64(0)
// if bytes is not nil then this is an update operation
if bytes != nil {
write = "update"
cl = (fileRange.End - fileRange.Start) + 1
}
extraHeaders := map[string]string{
"Content-Length": strconv.FormatUint(cl, 10),
"Range": fileRange.String(),
"x-ms-write": write,
}
if contentMD5 != nil {
extraHeaders["Content-MD5"] = *contentMD5
}
headers := mergeHeaders(f.fsc.client.getStandardHeaders(), extraHeaders)
resp, err := f.fsc.client.exec(http.MethodPut, uri, headers, bytes, f.fsc.auth)
if err != nil {
return nil, err
}
defer drainRespBody(resp)
return resp.Header, checkRespCode(resp, []int{http.StatusCreated})
}
// SetMetadata replaces the metadata for this file.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetFileMetadata. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Metadata
func (f *File) SetMetadata(options *FileRequestOptions) error {
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil), options)
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
return nil
}
// SetProperties sets system properties on this file.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by SetFileProperties. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Properties
func (f *File) SetProperties(options *FileRequestOptions) error {
headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties), options)
if err != nil {
return err
}
f.updateEtagAndLastModified(headers)
return nil
}
// updates Etag and last modified date
func (f *File) updateEtagAndLastModified(headers http.Header) {
f.Properties.Etag = headers.Get("Etag")
f.Properties.LastModified = headers.Get("Last-Modified")
}
// updates file properties from the specified HTTP header
func (f *File) updateProperties(header http.Header) {
size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64)
if err == nil {
f.Properties.Length = size
}
f.updateEtagAndLastModified(header)
f.Properties.CacheControl = header.Get("Cache-Control")
f.Properties.Disposition = header.Get("Content-Disposition")
f.Properties.Encoding = header.Get("Content-Encoding")
f.Properties.Language = header.Get("Content-Language")
f.Properties.MD5 = header.Get("Content-MD5")
f.Properties.Type = header.Get("Content-Type")
}
// URL gets the canonical URL to this file.
// This method does not create a publicly accessible URL if the file
// is private and this method does not check if the file exists.
func (f *File) URL() string {
return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), nil)
}
// WriteRangeOptions includes options for a write file range operation
type WriteRangeOptions struct {
Timeout uint
ContentMD5 string
}
// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content (inside
// options parameter). Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with
// a maximum size of 4MB.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range
func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, options *WriteRangeOptions) error {
if bytes == nil {
return errors.New("bytes cannot be nil")
}
var timeout *uint
var md5 *string
if options != nil {
timeout = &options.Timeout
md5 = &options.ContentMD5
}
headers, err := f.modifyRange(bytes, fileRange, timeout, md5)
if err != nil {
return err
}
// it's perfectly legal for multiple go routines to call WriteRange
// on the same *File (e.g. concurrently writing non-overlapping ranges)
// so we must take the file mutex before updating our properties.
f.mutex.Lock()
f.updateEtagAndLastModified(headers)
f.mutex.Unlock()
return nil
}

View File

@@ -1,338 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strconv"
)
// FileServiceClient contains operations for Microsoft Azure File Service.
type FileServiceClient struct {
client Client
auth authentication
}
// ListSharesParameters defines the set of customizable parameters to make a
// List Shares call.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares
type ListSharesParameters struct {
Prefix string
Marker string
Include string
MaxResults uint
Timeout uint
}
// ShareListResponse contains the response fields from
// ListShares call.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares
type ShareListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"`
Prefix string `xml:"Prefix"`
Marker string `xml:"Marker"`
NextMarker string `xml:"NextMarker"`
MaxResults int64 `xml:"MaxResults"`
Shares []Share `xml:"Shares>Share"`
}
type compType string
const (
compNone compType = ""
compList compType = "list"
compMetadata compType = "metadata"
compProperties compType = "properties"
compRangeList compType = "rangelist"
)
func (ct compType) String() string {
return string(ct)
}
type resourceType string
const (
resourceDirectory resourceType = "directory"
resourceFile resourceType = ""
resourceShare resourceType = "share"
)
func (rt resourceType) String() string {
return string(rt)
}
func (p ListSharesParameters) getParameters() url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.Include != "" {
out.Set("include", p.Include)
}
if p.MaxResults != 0 {
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
}
if p.Timeout != 0 {
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
}
return out
}
func (p ListDirsAndFilesParameters) getParameters() url.Values {
out := url.Values{}
if p.Prefix != "" {
out.Set("prefix", p.Prefix)
}
if p.Marker != "" {
out.Set("marker", p.Marker)
}
if p.MaxResults != 0 {
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
}
out = addTimeout(out, p.Timeout)
return out
}
// returns url.Values for the specified types
func getURLInitValues(comp compType, res resourceType) url.Values {
values := url.Values{}
if comp != compNone {
values.Set("comp", comp.String())
}
if res != resourceFile {
values.Set("restype", res.String())
}
return values
}
// GetShareReference returns a Share object for the specified share name.
func (f *FileServiceClient) GetShareReference(name string) *Share {
return &Share{
fsc: f,
Name: name,
Properties: ShareProperties{
Quota: -1,
},
}
}
// ListShares returns the list of shares in a storage account along with
// pagination token and other response details.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/list-shares
func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) {
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
var out ShareListResponse
resp, err := f.listContent("", q, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = xmlUnmarshal(resp.Body, &out)
// assign our client to the newly created Share objects
for i := range out.Shares {
out.Shares[i].fsc = &f
}
return &out, err
}
// GetServiceProperties gets the properties of your storage account's file service.
// File service does not support logging
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-service-properties
func (f *FileServiceClient) GetServiceProperties() (*ServiceProperties, error) {
return f.client.getServiceProperties(fileServiceName, f.auth)
}
// SetServiceProperties sets the properties of your storage account's file service.
// File service does not support logging
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-file-service-properties
func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error {
return f.client.setServiceProperties(props, fileServiceName, f.auth)
}
// retrieves directory or share content
func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*http.Response, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
uri := f.client.getEndpoint(fileServiceName, path, params)
extraHeaders = f.client.protectUserAgent(extraHeaders)
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
resp, err := f.client.exec(http.MethodGet, uri, headers, nil, f.auth)
if err != nil {
return nil, err
}
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
drainRespBody(resp)
return nil, err
}
return resp, nil
}
// returns true if the specified resource exists
func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, http.Header, error) {
if err := f.checkForStorageEmulator(); err != nil {
return false, nil, err
}
uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res))
headers := f.client.getStandardHeaders()
resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusOK, resp.Header, nil
}
}
return false, nil, err
}
// creates a resource depending on the specified resource type
func (f FileServiceClient) createResource(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string, expectedResponseCodes []int) (http.Header, error) {
resp, err := f.createResourceNoClose(path, res, urlParams, extraHeaders)
if err != nil {
return nil, err
}
defer drainRespBody(resp)
return resp.Header, checkRespCode(resp, expectedResponseCodes)
}
// creates a resource depending on the specified resource type, doesn't close the response body
func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*http.Response, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
values := getURLInitValues(compNone, res)
combinedParams := mergeParams(values, urlParams)
uri := f.client.getEndpoint(fileServiceName, path, combinedParams)
extraHeaders = f.client.protectUserAgent(extraHeaders)
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
return f.client.exec(http.MethodPut, uri, headers, nil, f.auth)
}
// returns HTTP header data for the specified directory or share
func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, params url.Values, verb string) (http.Header, error) {
resp, err := f.getResourceNoClose(path, comp, res, params, verb, nil)
if err != nil {
return nil, err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
return nil, err
}
return resp.Header, nil
}
// gets the specified resource, doesn't close the response body
func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, params url.Values, verb string, extraHeaders map[string]string) (*http.Response, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
params = mergeParams(params, getURLInitValues(comp, res))
uri := f.client.getEndpoint(fileServiceName, path, params)
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
return f.client.exec(verb, uri, headers, nil, f.auth)
}
// deletes the resource and returns the response
func (f FileServiceClient) deleteResource(path string, res resourceType, options *FileRequestOptions) error {
resp, err := f.deleteResourceNoClose(path, res, options)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusAccepted})
}
// deletes the resource and returns the response, doesn't close the response body
func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType, options *FileRequestOptions) (*http.Response, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
values := mergeParams(getURLInitValues(compNone, res), prepareOptions(options))
uri := f.client.getEndpoint(fileServiceName, path, values)
return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth)
}
// merges metadata into extraHeaders and returns extraHeaders
func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string {
if metadata == nil && extraHeaders == nil {
return nil
}
if extraHeaders == nil {
extraHeaders = make(map[string]string)
}
for k, v := range metadata {
extraHeaders[userDefinedMetadataHeaderPrefix+k] = v
}
return extraHeaders
}
// sets extra header data for the specified resource
func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string, options *FileRequestOptions) (http.Header, error) {
if err := f.checkForStorageEmulator(); err != nil {
return nil, err
}
params := mergeParams(getURLInitValues(comp, res), prepareOptions(options))
uri := f.client.getEndpoint(fileServiceName, path, params)
extraHeaders = f.client.protectUserAgent(extraHeaders)
headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
resp, err := f.client.exec(http.MethodPut, uri, headers, nil, f.auth)
if err != nil {
return nil, err
}
defer drainRespBody(resp)
return resp.Header, checkRespCode(resp, []int{http.StatusOK})
}
//checkForStorageEmulator determines if the client is setup for use with
//Azure Storage Emulator, and returns a relevant error
func (f FileServiceClient) checkForStorageEmulator() error {
if f.client.accountName == StorageEmulatorAccountName {
return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator")
}
return nil
}

View File

@@ -1,201 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"errors"
"net/http"
"net/url"
"strconv"
"time"
)
// lease constants.
const (
leaseHeaderPrefix = "x-ms-lease-"
headerLeaseID = "x-ms-lease-id"
leaseAction = "x-ms-lease-action"
leaseBreakPeriod = "x-ms-lease-break-period"
leaseDuration = "x-ms-lease-duration"
leaseProposedID = "x-ms-proposed-lease-id"
leaseTime = "x-ms-lease-time"
acquireLease = "acquire"
renewLease = "renew"
changeLease = "change"
releaseLease = "release"
breakLease = "break"
)
// leasePut is common PUT code for the various acquire/release/break etc functions.
func (b *Blob) leaseCommonPut(headers map[string]string, expectedStatus int, options *LeaseOptions) (http.Header, error) {
params := url.Values{"comp": {"lease"}}
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return nil, err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{expectedStatus}); err != nil {
return nil, err
}
return resp.Header, nil
}
// LeaseOptions includes options for all operations regarding leasing blobs
type LeaseOptions struct {
Timeout uint
Origin string `header:"Origin"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
RequestID string `header:"x-ms-client-request-id"`
}
// AcquireLease creates a lease for a blob
// returns leaseID acquired
// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum
// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
func (b *Blob) AcquireLease(leaseTimeInSeconds int, proposedLeaseID string, options *LeaseOptions) (returnedLeaseID string, err error) {
headers := b.Container.bsc.client.getStandardHeaders()
headers[leaseAction] = acquireLease
if leaseTimeInSeconds == -1 {
// Do nothing, but don't trigger the following clauses.
} else if leaseTimeInSeconds > 60 || b.Container.bsc.client.apiVersion < "2012-02-12" {
leaseTimeInSeconds = 60
} else if leaseTimeInSeconds < 15 {
leaseTimeInSeconds = 15
}
headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds)
if proposedLeaseID != "" {
headers[leaseProposedID] = proposedLeaseID
}
respHeaders, err := b.leaseCommonPut(headers, http.StatusCreated, options)
if err != nil {
return "", err
}
returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID))
if returnedLeaseID != "" {
return returnedLeaseID, nil
}
return "", errors.New("LeaseID not returned")
}
// BreakLease breaks the lease for a blob
// Returns the timeout remaining in the lease in seconds
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
func (b *Blob) BreakLease(options *LeaseOptions) (breakTimeout int, err error) {
headers := b.Container.bsc.client.getStandardHeaders()
headers[leaseAction] = breakLease
return b.breakLeaseCommon(headers, options)
}
// BreakLeaseWithBreakPeriod breaks the lease for a blob
// breakPeriodInSeconds is used to determine how long until new lease can be created.
// Returns the timeout remaining in the lease in seconds
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
func (b *Blob) BreakLeaseWithBreakPeriod(breakPeriodInSeconds int, options *LeaseOptions) (breakTimeout int, err error) {
headers := b.Container.bsc.client.getStandardHeaders()
headers[leaseAction] = breakLease
headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds)
return b.breakLeaseCommon(headers, options)
}
// breakLeaseCommon is common code for both version of BreakLease (with and without break period)
func (b *Blob) breakLeaseCommon(headers map[string]string, options *LeaseOptions) (breakTimeout int, err error) {
respHeaders, err := b.leaseCommonPut(headers, http.StatusAccepted, options)
if err != nil {
return 0, err
}
breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime))
if breakTimeoutStr != "" {
breakTimeout, err = strconv.Atoi(breakTimeoutStr)
if err != nil {
return 0, err
}
}
return breakTimeout, nil
}
// ChangeLease changes a lease ID for a blob
// Returns the new LeaseID acquired
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
func (b *Blob) ChangeLease(currentLeaseID string, proposedLeaseID string, options *LeaseOptions) (newLeaseID string, err error) {
headers := b.Container.bsc.client.getStandardHeaders()
headers[leaseAction] = changeLease
headers[headerLeaseID] = currentLeaseID
headers[leaseProposedID] = proposedLeaseID
respHeaders, err := b.leaseCommonPut(headers, http.StatusOK, options)
if err != nil {
return "", err
}
newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID))
if newLeaseID != "" {
return newLeaseID, nil
}
return "", errors.New("LeaseID not returned")
}
// ReleaseLease releases the lease for a blob
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob
func (b *Blob) ReleaseLease(currentLeaseID string, options *LeaseOptions) error {
headers := b.Container.bsc.client.getStandardHeaders()
headers[leaseAction] = releaseLease
headers[headerLeaseID] = currentLeaseID
_, err := b.leaseCommonPut(headers, http.StatusOK, options)
if err != nil {
return err
}
return nil
}
// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
func (b *Blob) RenewLease(currentLeaseID string, options *LeaseOptions) error {
headers := b.Container.bsc.client.getStandardHeaders()
headers[leaseAction] = renewLease
headers[headerLeaseID] = currentLeaseID
_, err := b.leaseCommonPut(headers, http.StatusOK, options)
if err != nil {
return err
}
return nil
}

View File

@@ -1,171 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strconv"
"time"
)
// Message represents an Azure message.
type Message struct {
Queue *Queue
Text string `xml:"MessageText"`
ID string `xml:"MessageId"`
Insertion TimeRFC1123 `xml:"InsertionTime"`
Expiration TimeRFC1123 `xml:"ExpirationTime"`
PopReceipt string `xml:"PopReceipt"`
NextVisible TimeRFC1123 `xml:"TimeNextVisible"`
DequeueCount int `xml:"DequeueCount"`
}
func (m *Message) buildPath() string {
return fmt.Sprintf("%s/%s", m.Queue.buildPathMessages(), m.ID)
}
// PutMessageOptions is the set of options can be specified for Put Messsage
// operation. A zero struct does not use any preferences for the request.
type PutMessageOptions struct {
Timeout uint
VisibilityTimeout int
MessageTTL int
RequestID string `header:"x-ms-client-request-id"`
}
// Put operation adds a new message to the back of the message queue.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Message
func (m *Message) Put(options *PutMessageOptions) error {
query := url.Values{}
headers := m.Queue.qsc.client.getStandardHeaders()
req := putMessageRequest{MessageText: m.Text}
body, nn, err := xmlMarshal(req)
if err != nil {
return err
}
headers["Content-Length"] = strconv.Itoa(nn)
if options != nil {
if options.VisibilityTimeout != 0 {
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
}
if options.MessageTTL != 0 {
query.Set("messagettl", strconv.Itoa(options.MessageTTL))
}
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.Queue.buildPathMessages(), query)
resp, err := m.Queue.qsc.client.exec(http.MethodPost, uri, headers, body, m.Queue.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
err = checkRespCode(resp, []int{http.StatusCreated})
if err != nil {
return err
}
err = xmlUnmarshal(resp.Body, m)
if err != nil {
return err
}
return nil
}
// UpdateMessageOptions is the set of options can be specified for Update Messsage
// operation. A zero struct does not use any preferences for the request.
type UpdateMessageOptions struct {
Timeout uint
VisibilityTimeout int
RequestID string `header:"x-ms-client-request-id"`
}
// Update operation updates the specified message.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Update-Message
func (m *Message) Update(options *UpdateMessageOptions) error {
query := url.Values{}
if m.PopReceipt != "" {
query.Set("popreceipt", m.PopReceipt)
}
headers := m.Queue.qsc.client.getStandardHeaders()
req := putMessageRequest{MessageText: m.Text}
body, nn, err := xmlMarshal(req)
if err != nil {
return err
}
headers["Content-Length"] = strconv.Itoa(nn)
// visibilitytimeout is required for Update (zero or greater) so set the default here
query.Set("visibilitytimeout", "0")
if options != nil {
if options.VisibilityTimeout != 0 {
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
}
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), query)
resp, err := m.Queue.qsc.client.exec(http.MethodPut, uri, headers, body, m.Queue.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
m.PopReceipt = resp.Header.Get("x-ms-popreceipt")
nextTimeStr := resp.Header.Get("x-ms-time-next-visible")
if nextTimeStr != "" {
nextTime, err := time.Parse(time.RFC1123, nextTimeStr)
if err != nil {
return err
}
m.NextVisible = TimeRFC1123(nextTime)
}
return checkRespCode(resp, []int{http.StatusNoContent})
}
// Delete operation deletes the specified message.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx
func (m *Message) Delete(options *QueueServiceOptions) error {
params := url.Values{"popreceipt": {m.PopReceipt}}
headers := m.Queue.qsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), params)
resp, err := m.Queue.qsc.client.exec(http.MethodDelete, uri, headers, nil, m.Queue.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
type putMessageRequest struct {
XMLName xml.Name `xml:"QueueMessage"`
MessageText string `xml:"MessageText"`
}

View File

@@ -1,48 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// MetadataLevel determines if operations should return a paylod,
// and it level of detail.
type MetadataLevel string
// This consts are meant to help with Odata supported operations
const (
OdataTypeSuffix = "@odata.type"
// Types
OdataBinary = "Edm.Binary"
OdataDateTime = "Edm.DateTime"
OdataDouble = "Edm.Double"
OdataGUID = "Edm.Guid"
OdataInt64 = "Edm.Int64"
// Query options
OdataFilter = "$filter"
OdataOrderBy = "$orderby"
OdataTop = "$top"
OdataSkip = "$skip"
OdataCount = "$count"
OdataExpand = "$expand"
OdataSelect = "$select"
OdataSearch = "$search"
EmptyPayload MetadataLevel = ""
NoMetadata MetadataLevel = "application/json;odata=nometadata"
MinimalMetadata MetadataLevel = "application/json;odata=minimalmetadata"
FullMetadata MetadataLevel = "application/json;odata=fullmetadata"
)

View File

@@ -1,203 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"time"
)
// GetPageRangesResponse contains the response fields from
// Get Page Ranges call.
//
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
type GetPageRangesResponse struct {
XMLName xml.Name `xml:"PageList"`
PageList []PageRange `xml:"PageRange"`
}
// PageRange contains information about a page of a page blob from
// Get Pages Range call.
//
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
type PageRange struct {
Start int64 `xml:"Start"`
End int64 `xml:"End"`
}
var (
errBlobCopyAborted = errors.New("storage: blob copy is aborted")
errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch")
)
// PutPageOptions includes the options for a put page operation
type PutPageOptions struct {
Timeout uint
LeaseID string `header:"x-ms-lease-id"`
IfSequenceNumberLessThanOrEqualTo *int `header:"x-ms-if-sequence-number-le"`
IfSequenceNumberLessThan *int `header:"x-ms-if-sequence-number-lt"`
IfSequenceNumberEqualTo *int `header:"x-ms-if-sequence-number-eq"`
IfModifiedSince *time.Time `header:"If-Modified-Since"`
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
IfMatch string `header:"If-Match"`
IfNoneMatch string `header:"If-None-Match"`
RequestID string `header:"x-ms-client-request-id"`
}
// WriteRange writes a range of pages to a page blob.
// Ranges must be aligned with 512-byte boundaries and chunk must be of size
// multiplies by 512.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page
func (b *Blob) WriteRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error {
if bytes == nil {
return errors.New("bytes cannot be nil")
}
return b.modifyRange(blobRange, bytes, options)
}
// ClearRange clears the given range in a page blob.
// Ranges must be aligned with 512-byte boundaries and chunk must be of size
// multiplies by 512.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page
func (b *Blob) ClearRange(blobRange BlobRange, options *PutPageOptions) error {
return b.modifyRange(blobRange, nil, options)
}
func (b *Blob) modifyRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error {
if blobRange.End < blobRange.Start {
return errors.New("the value for rangeEnd must be greater than or equal to rangeStart")
}
if blobRange.Start%512 != 0 {
return errors.New("the value for rangeStart must be a multiple of 512")
}
if blobRange.End%512 != 511 {
return errors.New("the value for rangeEnd must be a multiple of 512 - 1")
}
params := url.Values{"comp": {"page"}}
// default to clear
write := "clear"
var cl uint64
// if bytes is not nil then this is an update operation
if bytes != nil {
write = "update"
cl = (blobRange.End - blobRange.Start) + 1
}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypePage)
headers["x-ms-page-write"] = write
headers["x-ms-range"] = blobRange.String()
headers["Content-Length"] = fmt.Sprintf("%v", cl)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes, b.Container.bsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusCreated})
}
// GetPageRangesOptions includes the options for a get page ranges operation
type GetPageRangesOptions struct {
Timeout uint
Snapshot *time.Time
PreviousSnapshot *time.Time
Range *BlobRange
LeaseID string `header:"x-ms-lease-id"`
RequestID string `header:"x-ms-client-request-id"`
}
// GetPageRanges returns the list of valid page ranges for a page blob.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Page-Ranges
func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesResponse, error) {
params := url.Values{"comp": {"pagelist"}}
headers := b.Container.bsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
params = addSnapshot(params, options.Snapshot)
if options.PreviousSnapshot != nil {
params.Add("prevsnapshot", timeRFC3339Formatted(*options.PreviousSnapshot))
}
if options.Range != nil {
headers["Range"] = options.Range.String()
}
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
var out GetPageRangesResponse
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return out, err
}
defer drainRespBody(resp)
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
return out, err
}
err = xmlUnmarshal(resp.Body, &out)
return out, err
}
// PutPageBlob initializes an empty page blob with specified name and maximum
// size in bytes (size must be aligned to a 512-byte boundary). A page blob must
// be created using this method before writing pages.
//
// See CreateBlockBlobFromReader for more info on creating blobs.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
func (b *Blob) PutPageBlob(options *PutBlobOptions) error {
if b.Properties.ContentLength%512 != 0 {
return errors.New("Content length must be aligned to a 512-byte boundary")
}
params := url.Values{}
headers := b.Container.bsc.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypePage)
headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", b.Properties.ContentLength)
headers["x-ms-blob-sequence-number"] = fmt.Sprintf("%v", b.Properties.SequenceNumber)
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
if err != nil {
return err
}
return b.respondCreation(resp, BlobTypePage)
}

View File

@@ -1,436 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"time"
)
const (
// casing is per Golang's http.Header canonicalizing the header names.
approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count"
)
// QueueAccessPolicy represents each access policy in the queue ACL.
type QueueAccessPolicy struct {
ID string
StartTime time.Time
ExpiryTime time.Time
CanRead bool
CanAdd bool
CanUpdate bool
CanProcess bool
}
// QueuePermissions represents the queue ACLs.
type QueuePermissions struct {
AccessPolicies []QueueAccessPolicy
}
// SetQueuePermissionOptions includes options for a set queue permissions operation
type SetQueuePermissionOptions struct {
Timeout uint
RequestID string `header:"x-ms-client-request-id"`
}
// Queue represents an Azure queue.
type Queue struct {
qsc *QueueServiceClient
Name string
Metadata map[string]string
AproxMessageCount uint64
}
func (q *Queue) buildPath() string {
return fmt.Sprintf("/%s", q.Name)
}
func (q *Queue) buildPathMessages() string {
return fmt.Sprintf("%s/messages", q.buildPath())
}
// QueueServiceOptions includes options for some queue service operations
type QueueServiceOptions struct {
Timeout uint
RequestID string `header:"x-ms-client-request-id"`
}
// Create operation creates a queue under the given account.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Queue4
func (q *Queue) Create(options *QueueServiceOptions) error {
params := url.Values{}
headers := q.qsc.client.getStandardHeaders()
headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusCreated})
}
// Delete operation permanently deletes the specified queue.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Queue3
func (q *Queue) Delete(options *QueueServiceOptions) error {
params := url.Values{}
headers := q.qsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
// Exists returns true if a queue with given name exists.
func (q *Queue) Exists() (bool, error) {
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}})
resp, err := q.qsc.client.exec(http.MethodGet, uri, q.qsc.client.getStandardHeaders(), nil, q.qsc.auth)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusOK, nil
}
err = getErrorFromResponse(resp)
}
return false, err
}
// SetMetadata operation sets user-defined metadata on the specified queue.
// Metadata is associated with the queue as name-value pairs.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata
func (q *Queue) SetMetadata(options *QueueServiceOptions) error {
params := url.Values{"comp": {"metadata"}}
headers := q.qsc.client.getStandardHeaders()
headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
// GetMetadata operation retrieves user-defined metadata and queue
// properties on the specified queue. Metadata is associated with
// the queue as name-values pairs.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata
//
// Because the way Golang's http client (and http.Header in particular)
// canonicalize header names, the returned metadata names would always
// be all lower case.
func (q *Queue) GetMetadata(options *QueueServiceOptions) error {
params := url.Values{"comp": {"metadata"}}
headers := q.qsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
aproxMessagesStr := resp.Header.Get(http.CanonicalHeaderKey(approximateMessagesCountHeader))
if aproxMessagesStr != "" {
aproxMessages, err := strconv.ParseUint(aproxMessagesStr, 10, 64)
if err != nil {
return err
}
q.AproxMessageCount = aproxMessages
}
q.Metadata = getMetadataFromHeaders(resp.Header)
return nil
}
// GetMessageReference returns a message object with the specified text.
func (q *Queue) GetMessageReference(text string) *Message {
return &Message{
Queue: q,
Text: text,
}
}
// GetMessagesOptions is the set of options can be specified for Get
// Messsages operation. A zero struct does not use any preferences for the
// request.
type GetMessagesOptions struct {
Timeout uint
NumOfMessages int
VisibilityTimeout int
RequestID string `header:"x-ms-client-request-id"`
}
type messages struct {
XMLName xml.Name `xml:"QueueMessagesList"`
Messages []Message `xml:"QueueMessage"`
}
// GetMessages operation retrieves one or more messages from the front of the
// queue.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Messages
func (q *Queue) GetMessages(options *GetMessagesOptions) ([]Message, error) {
query := url.Values{}
headers := q.qsc.client.getStandardHeaders()
if options != nil {
if options.NumOfMessages != 0 {
query.Set("numofmessages", strconv.Itoa(options.NumOfMessages))
}
if options.VisibilityTimeout != 0 {
query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout))
}
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query)
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
if err != nil {
return []Message{}, err
}
defer resp.Body.Close()
var out messages
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return []Message{}, err
}
for i := range out.Messages {
out.Messages[i].Queue = q
}
return out.Messages, err
}
// PeekMessagesOptions is the set of options can be specified for Peek
// Messsage operation. A zero struct does not use any preferences for the
// request.
type PeekMessagesOptions struct {
Timeout uint
NumOfMessages int
RequestID string `header:"x-ms-client-request-id"`
}
// PeekMessages retrieves one or more messages from the front of the queue, but
// does not alter the visibility of the message.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Peek-Messages
func (q *Queue) PeekMessages(options *PeekMessagesOptions) ([]Message, error) {
query := url.Values{"peekonly": {"true"}} // Required for peek operation
headers := q.qsc.client.getStandardHeaders()
if options != nil {
if options.NumOfMessages != 0 {
query.Set("numofmessages", strconv.Itoa(options.NumOfMessages))
}
query = addTimeout(query, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query)
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
if err != nil {
return []Message{}, err
}
defer resp.Body.Close()
var out messages
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return []Message{}, err
}
for i := range out.Messages {
out.Messages[i].Queue = q
}
return out.Messages, err
}
// ClearMessages operation deletes all messages from the specified queue.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Clear-Messages
func (q *Queue) ClearMessages(options *QueueServiceOptions) error {
params := url.Values{}
headers := q.qsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), params)
resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
// SetPermissions sets up queue permissions
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-acl
func (q *Queue) SetPermissions(permissions QueuePermissions, options *SetQueuePermissionOptions) error {
body, length, err := generateQueueACLpayload(permissions.AccessPolicies)
if err != nil {
return err
}
params := url.Values{
"comp": {"acl"},
}
headers := q.qsc.client.getStandardHeaders()
headers["Content-Length"] = strconv.Itoa(length)
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, body, q.qsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
func generateQueueACLpayload(policies []QueueAccessPolicy) (io.Reader, int, error) {
sil := SignedIdentifiers{
SignedIdentifiers: []SignedIdentifier{},
}
for _, qapd := range policies {
permission := qapd.generateQueuePermissions()
signedIdentifier := convertAccessPolicyToXMLStructs(qapd.ID, qapd.StartTime, qapd.ExpiryTime, permission)
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
}
return xmlMarshal(sil)
}
func (qapd *QueueAccessPolicy) generateQueuePermissions() (permissions string) {
// generate the permissions string (raup).
// still want the end user API to have bool flags.
permissions = ""
if qapd.CanRead {
permissions += "r"
}
if qapd.CanAdd {
permissions += "a"
}
if qapd.CanUpdate {
permissions += "u"
}
if qapd.CanProcess {
permissions += "p"
}
return permissions
}
// GetQueuePermissionOptions includes options for a get queue permissions operation
type GetQueuePermissionOptions struct {
Timeout uint
RequestID string `header:"x-ms-client-request-id"`
}
// GetPermissions gets the queue permissions as per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-acl
// If timeout is 0 then it will not be passed to Azure
func (q *Queue) GetPermissions(options *GetQueuePermissionOptions) (*QueuePermissions, error) {
params := url.Values{
"comp": {"acl"},
}
headers := q.qsc.client.getStandardHeaders()
if options != nil {
params = addTimeout(params, options.Timeout)
headers = mergeHeaders(headers, headersFromStruct(*options))
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var ap AccessPolicy
err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList)
if err != nil {
return nil, err
}
return buildQueueAccessPolicy(ap, &resp.Header), nil
}
func buildQueueAccessPolicy(ap AccessPolicy, headers *http.Header) *QueuePermissions {
permissions := QueuePermissions{
AccessPolicies: []QueueAccessPolicy{},
}
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
qapd := QueueAccessPolicy{
ID: policy.ID,
StartTime: policy.AccessPolicy.StartTime,
ExpiryTime: policy.AccessPolicy.ExpiryTime,
}
qapd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
qapd.CanAdd = updatePermissions(policy.AccessPolicy.Permission, "a")
qapd.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u")
qapd.CanProcess = updatePermissions(policy.AccessPolicy.Permission, "p")
permissions.AccessPolicies = append(permissions.AccessPolicies, qapd)
}
return &permissions
}

View File

@@ -1,146 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"errors"
"fmt"
"net/url"
"strings"
"time"
)
// QueueSASOptions are options to construct a blob SAS
// URI.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
type QueueSASOptions struct {
QueueSASPermissions
SASOptions
}
// QueueSASPermissions includes the available permissions for
// a queue SAS URI.
type QueueSASPermissions struct {
Read bool
Add bool
Update bool
Process bool
}
func (q QueueSASPermissions) buildString() string {
permissions := ""
if q.Read {
permissions += "r"
}
if q.Add {
permissions += "a"
}
if q.Update {
permissions += "u"
}
if q.Process {
permissions += "p"
}
return permissions
}
// GetSASURI creates an URL to the specified queue which contains the Shared
// Access Signature with specified permissions and expiration time.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
func (q *Queue) GetSASURI(options QueueSASOptions) (string, error) {
canonicalizedResource, err := q.qsc.client.buildCanonicalizedResource(q.buildPath(), q.qsc.auth, true)
if err != nil {
return "", err
}
// "The canonicalizedresouce portion of the string is a canonical path to the signed resource.
// It must include the service name (blob, table, queue or file) for version 2015-02-21 or
// later, the storage account name, and the resource name, and must be URL-decoded.
// -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
// We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1)
canonicalizedResource, err = url.QueryUnescape(canonicalizedResource)
if err != nil {
return "", err
}
signedStart := ""
if options.Start != (time.Time{}) {
signedStart = options.Start.UTC().Format(time.RFC3339)
}
signedExpiry := options.Expiry.UTC().Format(time.RFC3339)
protocols := "https,http"
if options.UseHTTPS {
protocols = "https"
}
permissions := options.QueueSASPermissions.buildString()
stringToSign, err := queueSASStringToSign(q.qsc.client.apiVersion, canonicalizedResource, signedStart, signedExpiry, options.IP, permissions, protocols, options.Identifier)
if err != nil {
return "", err
}
sig := q.qsc.client.computeHmac256(stringToSign)
sasParams := url.Values{
"sv": {q.qsc.client.apiVersion},
"se": {signedExpiry},
"sp": {permissions},
"sig": {sig},
}
if q.qsc.client.apiVersion >= "2015-04-05" {
sasParams.Add("spr", protocols)
addQueryParameter(sasParams, "sip", options.IP)
}
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), nil)
sasURL, err := url.Parse(uri)
if err != nil {
return "", err
}
sasURL.RawQuery = sasParams.Encode()
return sasURL.String(), nil
}
func queueSASStringToSign(signedVersion, canonicalizedResource, signedStart, signedExpiry, signedIP, signedPermissions, protocols, signedIdentifier string) (string, error) {
if signedVersion >= "2015-02-21" {
canonicalizedResource = "/queue" + canonicalizedResource
}
// https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12
if signedVersion >= "2015-04-05" {
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s",
signedPermissions,
signedStart,
signedExpiry,
canonicalizedResource,
signedIdentifier,
signedIP,
protocols,
signedVersion), nil
}
// reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
if signedVersion >= "2013-08-15" {
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion), nil
}
return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
}

View File

@@ -1,42 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// QueueServiceClient contains operations for Microsoft Azure Queue Storage
// Service.
type QueueServiceClient struct {
client Client
auth authentication
}
// GetServiceProperties gets the properties of your storage account's queue service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties
func (q *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) {
return q.client.getServiceProperties(queueServiceName, q.auth)
}
// SetServiceProperties sets the properties of your storage account's queue service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties
func (q *QueueServiceClient) SetServiceProperties(props ServiceProperties) error {
return q.client.setServiceProperties(props, queueServiceName, q.auth)
}
// GetQueueReference returns a Container object for the specified queue name.
func (q *QueueServiceClient) GetQueueReference(name string) *Queue {
return &Queue{
qsc: q,
Name: name,
}
}

View File

@@ -1,216 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"fmt"
"net/http"
"net/url"
"strconv"
)
// Share represents an Azure file share.
type Share struct {
fsc *FileServiceClient
Name string `xml:"Name"`
Properties ShareProperties `xml:"Properties"`
Metadata map[string]string
}
// ShareProperties contains various properties of a share.
type ShareProperties struct {
LastModified string `xml:"Last-Modified"`
Etag string `xml:"Etag"`
Quota int `xml:"Quota"`
}
// builds the complete path for this share object.
func (s *Share) buildPath() string {
return fmt.Sprintf("/%s", s.Name)
}
// Create this share under the associated account.
// If a share with the same name already exists, the operation fails.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share
func (s *Share) Create(options *FileRequestOptions) error {
extraheaders := map[string]string{}
if s.Properties.Quota > 0 {
extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota)
}
params := prepareOptions(options)
headers, err := s.fsc.createResource(s.buildPath(), resourceShare, params, mergeMDIntoExtraHeaders(s.Metadata, extraheaders), []int{http.StatusCreated})
if err != nil {
return err
}
s.updateEtagAndLastModified(headers)
return nil
}
// CreateIfNotExists creates this share under the associated account if
// it does not exist. Returns true if the share is newly created or false if
// the share already exists.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share
func (s *Share) CreateIfNotExists(options *FileRequestOptions) (bool, error) {
extraheaders := map[string]string{}
if s.Properties.Quota > 0 {
extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota)
}
params := prepareOptions(options)
resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, params, extraheaders)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict {
if resp.StatusCode == http.StatusCreated {
s.updateEtagAndLastModified(resp.Header)
return true, nil
}
return false, s.FetchAttributes(nil)
}
}
return false, err
}
// Delete marks this share for deletion. The share along with any files
// and directories contained within it are later deleted during garbage
// collection. If the share does not exist the operation fails
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share
func (s *Share) Delete(options *FileRequestOptions) error {
return s.fsc.deleteResource(s.buildPath(), resourceShare, options)
}
// DeleteIfExists operation marks this share for deletion if it exists.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share
func (s *Share) DeleteIfExists(options *FileRequestOptions) (bool, error) {
resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare, options)
if resp != nil {
defer drainRespBody(resp)
if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound {
return resp.StatusCode == http.StatusAccepted, nil
}
}
return false, err
}
// Exists returns true if this share already exists
// on the storage account, otherwise returns false.
func (s *Share) Exists() (bool, error) {
exists, headers, err := s.fsc.resourceExists(s.buildPath(), resourceShare)
if exists {
s.updateEtagAndLastModified(headers)
s.updateQuota(headers)
}
return exists, err
}
// FetchAttributes retrieves metadata and properties for this share.
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-share-properties
func (s *Share) FetchAttributes(options *FileRequestOptions) error {
params := prepareOptions(options)
headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, params, http.MethodHead)
if err != nil {
return err
}
s.updateEtagAndLastModified(headers)
s.updateQuota(headers)
s.Metadata = getMetadataFromHeaders(headers)
return nil
}
// GetRootDirectoryReference returns a Directory object at the root of this share.
func (s *Share) GetRootDirectoryReference() *Directory {
return &Directory{
fsc: s.fsc,
share: s,
}
}
// ServiceClient returns the FileServiceClient associated with this share.
func (s *Share) ServiceClient() *FileServiceClient {
return s.fsc
}
// SetMetadata replaces the metadata for this share.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by GetShareMetadata. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-share-metadata
func (s *Share) SetMetadata(options *FileRequestOptions) error {
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil), options)
if err != nil {
return err
}
s.updateEtagAndLastModified(headers)
return nil
}
// SetProperties sets system properties for this share.
//
// Some keys may be converted to Camel-Case before sending. All keys
// are returned in lower case by SetShareProperties. HTTP header names
// are case-insensitive so case munging should not matter to other
// applications either.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Share-Properties
func (s *Share) SetProperties(options *FileRequestOptions) error {
extraheaders := map[string]string{}
if s.Properties.Quota > 0 {
if s.Properties.Quota > 5120 {
return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota)
}
extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota)
}
headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, extraheaders, options)
if err != nil {
return err
}
s.updateEtagAndLastModified(headers)
return nil
}
// updates Etag and last modified date
func (s *Share) updateEtagAndLastModified(headers http.Header) {
s.Properties.Etag = headers.Get("Etag")
s.Properties.LastModified = headers.Get("Last-Modified")
}
// updates quota value
func (s *Share) updateQuota(headers http.Header) {
quota, err := strconv.Atoi(headers.Get("x-ms-share-quota"))
if err == nil {
s.Properties.Quota = quota
}
}
// URL gets the canonical URL to this share. This method does not create a publicly accessible
// URL if the share is private and this method does not check if the share exists.
func (s *Share) URL() string {
return s.fsc.client.getEndpoint(fileServiceName, s.buildPath(), url.Values{})
}

View File

@@ -1,61 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"strings"
"time"
)
// AccessPolicyDetailsXML has specifics about an access policy
// annotated with XML details.
type AccessPolicyDetailsXML struct {
StartTime time.Time `xml:"Start"`
ExpiryTime time.Time `xml:"Expiry"`
Permission string `xml:"Permission"`
}
// SignedIdentifier is a wrapper for a specific policy
type SignedIdentifier struct {
ID string `xml:"Id"`
AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"`
}
// SignedIdentifiers part of the response from GetPermissions call.
type SignedIdentifiers struct {
SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"`
}
// AccessPolicy is the response type from the GetPermissions call.
type AccessPolicy struct {
SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"`
}
// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the
// AccessPolicy struct which will get converted to XML.
func convertAccessPolicyToXMLStructs(id string, startTime time.Time, expiryTime time.Time, permissions string) SignedIdentifier {
return SignedIdentifier{
ID: id,
AccessPolicy: AccessPolicyDetailsXML{
StartTime: startTime.UTC().Round(time.Second),
ExpiryTime: expiryTime.UTC().Round(time.Second),
Permission: permissions,
},
}
}
func updatePermissions(permissions, permission string) bool {
return strings.Contains(permissions, permission)
}

View File

@@ -1,150 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"net/http"
"net/url"
"strconv"
)
// ServiceProperties represents the storage account service properties
type ServiceProperties struct {
Logging *Logging
HourMetrics *Metrics
MinuteMetrics *Metrics
Cors *Cors
DeleteRetentionPolicy *RetentionPolicy // blob storage only
StaticWebsite *StaticWebsite // blob storage only
}
// Logging represents the Azure Analytics Logging settings
type Logging struct {
Version string
Delete bool
Read bool
Write bool
RetentionPolicy *RetentionPolicy
}
// RetentionPolicy indicates if retention is enabled and for how many days
type RetentionPolicy struct {
Enabled bool
Days *int
}
// Metrics provide request statistics.
type Metrics struct {
Version string
Enabled bool
IncludeAPIs *bool
RetentionPolicy *RetentionPolicy
}
// Cors includes all the CORS rules
type Cors struct {
CorsRule []CorsRule
}
// CorsRule includes all settings for a Cors rule
type CorsRule struct {
AllowedOrigins string
AllowedMethods string
MaxAgeInSeconds int
ExposedHeaders string
AllowedHeaders string
}
// StaticWebsite - The properties that enable an account to host a static website
type StaticWebsite struct {
// Enabled - Indicates whether this account is hosting a static website
Enabled bool
// IndexDocument - The default name of the index page under each directory
IndexDocument *string
// ErrorDocument404Path - The absolute path of the custom 404 page
ErrorDocument404Path *string
}
func (c Client) getServiceProperties(service string, auth authentication) (*ServiceProperties, error) {
query := url.Values{
"restype": {"service"},
"comp": {"properties"},
}
uri := c.getEndpoint(service, "", query)
headers := c.getStandardHeaders()
resp, err := c.exec(http.MethodGet, uri, headers, nil, auth)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
return nil, err
}
var out ServiceProperties
err = xmlUnmarshal(resp.Body, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func (c Client) setServiceProperties(props ServiceProperties, service string, auth authentication) error {
query := url.Values{
"restype": {"service"},
"comp": {"properties"},
}
uri := c.getEndpoint(service, "", query)
// Ideally, StorageServiceProperties would be the output struct
// This is to avoid golint stuttering, while generating the correct XML
type StorageServiceProperties struct {
Logging *Logging
HourMetrics *Metrics
MinuteMetrics *Metrics
Cors *Cors
DeleteRetentionPolicy *RetentionPolicy
StaticWebsite *StaticWebsite
}
input := StorageServiceProperties{
Logging: props.Logging,
HourMetrics: props.HourMetrics,
MinuteMetrics: props.MinuteMetrics,
Cors: props.Cors,
}
// only set these fields for blob storage else it's invalid XML
if service == blobServiceName {
input.DeleteRetentionPolicy = props.DeleteRetentionPolicy
input.StaticWebsite = props.StaticWebsite
}
body, length, err := xmlMarshal(input)
if err != nil {
return err
}
headers := c.getStandardHeaders()
headers["Content-Length"] = strconv.Itoa(length)
resp, err := c.exec(http.MethodPut, uri, headers, body, auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusAccepted})
}

View File

@@ -1,423 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
const (
tablesURIPath = "/Tables"
nextTableQueryParameter = "NextTableName"
headerNextPartitionKey = "x-ms-continuation-NextPartitionKey"
headerNextRowKey = "x-ms-continuation-NextRowKey"
nextPartitionKeyQueryParameter = "NextPartitionKey"
nextRowKeyQueryParameter = "NextRowKey"
)
// TableAccessPolicy are used for SETTING table policies
type TableAccessPolicy struct {
ID string
StartTime time.Time
ExpiryTime time.Time
CanRead bool
CanAppend bool
CanUpdate bool
CanDelete bool
}
// Table represents an Azure table.
type Table struct {
tsc *TableServiceClient
Name string `json:"TableName"`
OdataEditLink string `json:"odata.editLink"`
OdataID string `json:"odata.id"`
OdataMetadata string `json:"odata.metadata"`
OdataType string `json:"odata.type"`
}
// EntityQueryResult contains the response from
// ExecuteQuery and ExecuteQueryNextResults functions.
type EntityQueryResult struct {
OdataMetadata string `json:"odata.metadata"`
Entities []*Entity `json:"value"`
QueryNextLink
table *Table
}
type continuationToken struct {
NextPartitionKey string
NextRowKey string
}
func (t *Table) buildPath() string {
return fmt.Sprintf("/%s", t.Name)
}
func (t *Table) buildSpecificPath() string {
return fmt.Sprintf("%s('%s')", tablesURIPath, t.Name)
}
// Get gets the referenced table.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities
func (t *Table) Get(timeout uint, ml MetadataLevel) error {
if ml == EmptyPayload {
return errEmptyPayload
}
query := url.Values{
"timeout": {strconv.FormatUint(uint64(timeout), 10)},
}
headers := t.tsc.client.getStandardHeaders()
headers[headerAccept] = string(ml)
uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), query)
resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth)
if err != nil {
return err
}
defer resp.Body.Close()
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
return err
}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
err = json.Unmarshal(respBody, t)
if err != nil {
return err
}
return nil
}
// Create creates the referenced table.
// This function fails if the name is not compliant
// with the specification or the tables already exists.
// ml determines the level of detail of metadata in the operation response,
// or no data at all.
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/create-table
func (t *Table) Create(timeout uint, ml MetadataLevel, options *TableOptions) error {
uri := t.tsc.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{
"timeout": {strconv.FormatUint(uint64(timeout), 10)},
})
type createTableRequest struct {
TableName string `json:"TableName"`
}
req := createTableRequest{TableName: t.Name}
buf := new(bytes.Buffer)
if err := json.NewEncoder(buf).Encode(req); err != nil {
return err
}
headers := t.tsc.client.getStandardHeaders()
headers = addReturnContentHeaders(headers, ml)
headers = addBodyRelatedHeaders(headers, buf.Len())
headers = options.addToHeaders(headers)
resp, err := t.tsc.client.exec(http.MethodPost, uri, headers, buf, t.tsc.auth)
if err != nil {
return err
}
defer resp.Body.Close()
if ml == EmptyPayload {
if err := checkRespCode(resp, []int{http.StatusNoContent}); err != nil {
return err
}
} else {
if err := checkRespCode(resp, []int{http.StatusCreated}); err != nil {
return err
}
}
if ml != EmptyPayload {
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
err = json.Unmarshal(data, t)
if err != nil {
return err
}
}
return nil
}
// Delete deletes the referenced table.
// This function fails if the table is not present.
// Be advised: Delete deletes all the entries that may be present.
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/delete-table
func (t *Table) Delete(timeout uint, options *TableOptions) error {
uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), url.Values{
"timeout": {strconv.Itoa(int(timeout))},
})
headers := t.tsc.client.getStandardHeaders()
headers = addReturnContentHeaders(headers, EmptyPayload)
headers = options.addToHeaders(headers)
resp, err := t.tsc.client.exec(http.MethodDelete, uri, headers, nil, t.tsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
// QueryOptions includes options for a query entities operation.
// Top, filter and select are OData query options.
type QueryOptions struct {
Top uint
Filter string
Select []string
RequestID string
}
func (options *QueryOptions) getParameters() (url.Values, map[string]string) {
query := url.Values{}
headers := map[string]string{}
if options != nil {
if options.Top > 0 {
query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10))
}
if options.Filter != "" {
query.Add(OdataFilter, options.Filter)
}
if len(options.Select) > 0 {
query.Add(OdataSelect, strings.Join(options.Select, ","))
}
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
}
return query, headers
}
// QueryEntities returns the entities in the table.
// You can use query options defined by the OData Protocol specification.
//
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
func (t *Table) QueryEntities(timeout uint, ml MetadataLevel, options *QueryOptions) (*EntityQueryResult, error) {
if ml == EmptyPayload {
return nil, errEmptyPayload
}
query, headers := options.getParameters()
query = addTimeout(query, timeout)
uri := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), query)
return t.queryEntities(uri, headers, ml)
}
// NextResults returns the next page of results
// from a QueryEntities or NextResults operation.
//
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination
func (eqr *EntityQueryResult) NextResults(options *TableOptions) (*EntityQueryResult, error) {
if eqr == nil {
return nil, errNilPreviousResult
}
if eqr.NextLink == nil {
return nil, errNilNextLink
}
headers := options.addToHeaders(map[string]string{})
return eqr.table.queryEntities(*eqr.NextLink, headers, eqr.ml)
}
// SetPermissions sets up table ACL permissions
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/Set-Table-ACL
func (t *Table) SetPermissions(tap []TableAccessPolicy, timeout uint, options *TableOptions) error {
params := url.Values{"comp": {"acl"},
"timeout": {strconv.Itoa(int(timeout))},
}
uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params)
headers := t.tsc.client.getStandardHeaders()
headers = options.addToHeaders(headers)
body, length, err := generateTableACLPayload(tap)
if err != nil {
return err
}
headers["Content-Length"] = strconv.Itoa(length)
resp, err := t.tsc.client.exec(http.MethodPut, uri, headers, body, t.tsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp)
return checkRespCode(resp, []int{http.StatusNoContent})
}
func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) {
sil := SignedIdentifiers{
SignedIdentifiers: []SignedIdentifier{},
}
for _, tap := range policies {
permission := generateTablePermissions(&tap)
signedIdentifier := convertAccessPolicyToXMLStructs(tap.ID, tap.StartTime, tap.ExpiryTime, permission)
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
}
return xmlMarshal(sil)
}
// GetPermissions gets the table ACL permissions
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/get-table-acl
func (t *Table) GetPermissions(timeout int, options *TableOptions) ([]TableAccessPolicy, error) {
params := url.Values{"comp": {"acl"},
"timeout": {strconv.Itoa(int(timeout))},
}
uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params)
headers := t.tsc.client.getStandardHeaders()
headers = options.addToHeaders(headers)
resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
return nil, err
}
var ap AccessPolicy
err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList)
if err != nil {
return nil, err
}
return updateTableAccessPolicy(ap), nil
}
func (t *Table) queryEntities(uri string, headers map[string]string, ml MetadataLevel) (*EntityQueryResult, error) {
headers = mergeHeaders(headers, t.tsc.client.getStandardHeaders())
if ml != EmptyPayload {
headers[headerAccept] = string(ml)
}
resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err = checkRespCode(resp, []int{http.StatusOK}); err != nil {
return nil, err
}
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var entities EntityQueryResult
err = json.Unmarshal(data, &entities)
if err != nil {
return nil, err
}
for i := range entities.Entities {
entities.Entities[i].Table = t
}
entities.table = t
contToken := extractContinuationTokenFromHeaders(resp.Header)
if contToken == nil {
entities.NextLink = nil
} else {
originalURI, err := url.Parse(uri)
if err != nil {
return nil, err
}
v := originalURI.Query()
if contToken.NextPartitionKey != "" {
v.Set(nextPartitionKeyQueryParameter, contToken.NextPartitionKey)
}
if contToken.NextRowKey != "" {
v.Set(nextRowKeyQueryParameter, contToken.NextRowKey)
}
newURI := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), v)
entities.NextLink = &newURI
entities.ml = ml
}
return &entities, nil
}
func extractContinuationTokenFromHeaders(h http.Header) *continuationToken {
ct := continuationToken{
NextPartitionKey: h.Get(headerNextPartitionKey),
NextRowKey: h.Get(headerNextRowKey),
}
if ct.NextPartitionKey != "" || ct.NextRowKey != "" {
return &ct
}
return nil
}
func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy {
taps := []TableAccessPolicy{}
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
tap := TableAccessPolicy{
ID: policy.ID,
StartTime: policy.AccessPolicy.StartTime,
ExpiryTime: policy.AccessPolicy.ExpiryTime,
}
tap.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
tap.CanAppend = updatePermissions(policy.AccessPolicy.Permission, "a")
tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u")
tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
taps = append(taps, tap)
}
return taps
}
func generateTablePermissions(tap *TableAccessPolicy) (permissions string) {
// generate the permissions string (raud).
// still want the end user API to have bool flags.
permissions = ""
if tap.CanRead {
permissions += "r"
}
if tap.CanAppend {
permissions += "a"
}
if tap.CanUpdate {
permissions += "u"
}
if tap.CanDelete {
permissions += "d"
}
return permissions
}

View File

@@ -1,325 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"mime/multipart"
"net/http"
"net/textproto"
"sort"
"strings"
)
// Operation type. Insert, Delete, Replace etc.
type Operation int
// consts for batch operations.
const (
InsertOp = Operation(1)
DeleteOp = Operation(2)
ReplaceOp = Operation(3)
MergeOp = Operation(4)
InsertOrReplaceOp = Operation(5)
InsertOrMergeOp = Operation(6)
)
// BatchEntity used for tracking Entities to operate on and
// whether operations (replace/merge etc) should be forced.
// Wrapper for regular Entity with additional data specific for the entity.
type BatchEntity struct {
*Entity
Force bool
Op Operation
}
// TableBatch stores all the entities that will be operated on during a batch process.
// Entities can be inserted, replaced or deleted.
type TableBatch struct {
BatchEntitySlice []BatchEntity
// reference to table we're operating on.
Table *Table
}
// defaultChangesetHeaders for changeSets
var defaultChangesetHeaders = map[string]string{
"Accept": "application/json;odata=minimalmetadata",
"Content-Type": "application/json",
"Prefer": "return-no-content",
}
// NewBatch return new TableBatch for populating.
func (t *Table) NewBatch() *TableBatch {
return &TableBatch{
Table: t,
}
}
// InsertEntity adds an entity in preparation for a batch insert.
func (t *TableBatch) InsertEntity(entity *Entity) {
be := BatchEntity{Entity: entity, Force: false, Op: InsertOp}
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
}
// InsertOrReplaceEntity adds an entity in preparation for a batch insert or replace.
func (t *TableBatch) InsertOrReplaceEntity(entity *Entity, force bool) {
be := BatchEntity{Entity: entity, Force: false, Op: InsertOrReplaceOp}
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
}
// InsertOrReplaceEntityByForce adds an entity in preparation for a batch insert or replace. Forces regardless of ETag
func (t *TableBatch) InsertOrReplaceEntityByForce(entity *Entity) {
t.InsertOrReplaceEntity(entity, true)
}
// InsertOrMergeEntity adds an entity in preparation for a batch insert or merge.
func (t *TableBatch) InsertOrMergeEntity(entity *Entity, force bool) {
be := BatchEntity{Entity: entity, Force: false, Op: InsertOrMergeOp}
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
}
// InsertOrMergeEntityByForce adds an entity in preparation for a batch insert or merge. Forces regardless of ETag
func (t *TableBatch) InsertOrMergeEntityByForce(entity *Entity) {
t.InsertOrMergeEntity(entity, true)
}
// ReplaceEntity adds an entity in preparation for a batch replace.
func (t *TableBatch) ReplaceEntity(entity *Entity) {
be := BatchEntity{Entity: entity, Force: false, Op: ReplaceOp}
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
}
// DeleteEntity adds an entity in preparation for a batch delete
func (t *TableBatch) DeleteEntity(entity *Entity, force bool) {
be := BatchEntity{Entity: entity, Force: false, Op: DeleteOp}
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
}
// DeleteEntityByForce adds an entity in preparation for a batch delete. Forces regardless of ETag
func (t *TableBatch) DeleteEntityByForce(entity *Entity, force bool) {
t.DeleteEntity(entity, true)
}
// MergeEntity adds an entity in preparation for a batch merge
func (t *TableBatch) MergeEntity(entity *Entity) {
be := BatchEntity{Entity: entity, Force: false, Op: MergeOp}
t.BatchEntitySlice = append(t.BatchEntitySlice, be)
}
// ExecuteBatch executes many table operations in one request to Azure.
// The operations can be combinations of Insert, Delete, Replace and Merge
// Creates the inner changeset body (various operations, Insert, Delete etc) then creates the outer request packet that encompasses
// the changesets.
// As per document https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/performing-entity-group-transactions
func (t *TableBatch) ExecuteBatch() error {
id, err := newUUID()
if err != nil {
return err
}
changesetBoundary := fmt.Sprintf("changeset_%s", id.String())
uri := t.Table.tsc.client.getEndpoint(tableServiceName, "$batch", nil)
changesetBody, err := t.generateChangesetBody(changesetBoundary)
if err != nil {
return err
}
id, err = newUUID()
if err != nil {
return err
}
boundary := fmt.Sprintf("batch_%s", id.String())
body, err := generateBody(changesetBody, changesetBoundary, boundary)
if err != nil {
return err
}
headers := t.Table.tsc.client.getStandardHeaders()
headers[headerContentType] = fmt.Sprintf("multipart/mixed; boundary=%s", boundary)
resp, err := t.Table.tsc.client.execBatchOperationJSON(http.MethodPost, uri, headers, bytes.NewReader(body.Bytes()), t.Table.tsc.auth)
if err != nil {
return err
}
defer drainRespBody(resp.resp)
if err = checkRespCode(resp.resp, []int{http.StatusAccepted}); err != nil {
// check which batch failed.
operationFailedMessage := t.getFailedOperation(resp.odata.Err.Message.Value)
requestID, date, version := getDebugHeaders(resp.resp.Header)
return AzureStorageServiceError{
StatusCode: resp.resp.StatusCode,
Code: resp.odata.Err.Code,
RequestID: requestID,
Date: date,
APIVersion: version,
Message: operationFailedMessage,
}
}
return nil
}
// getFailedOperation parses the original Azure error string and determines which operation failed
// and generates appropriate message.
func (t *TableBatch) getFailedOperation(errorMessage string) string {
// errorMessage consists of "number:string" we just need the number.
sp := strings.Split(errorMessage, ":")
if len(sp) > 1 {
msg := fmt.Sprintf("Element %s in the batch returned an unexpected response code.\n%s", sp[0], errorMessage)
return msg
}
// cant parse the message, just return the original message to client
return errorMessage
}
// generateBody generates the complete body for the batch request.
func generateBody(changeSetBody *bytes.Buffer, changesetBoundary string, boundary string) (*bytes.Buffer, error) {
body := new(bytes.Buffer)
writer := multipart.NewWriter(body)
writer.SetBoundary(boundary)
h := make(textproto.MIMEHeader)
h.Set(headerContentType, fmt.Sprintf("multipart/mixed; boundary=%s\r\n", changesetBoundary))
batchWriter, err := writer.CreatePart(h)
if err != nil {
return nil, err
}
batchWriter.Write(changeSetBody.Bytes())
writer.Close()
return body, nil
}
// generateChangesetBody generates the individual changesets for the various operations within the batch request.
// There is a changeset for Insert, Delete, Merge etc.
func (t *TableBatch) generateChangesetBody(changesetBoundary string) (*bytes.Buffer, error) {
body := new(bytes.Buffer)
writer := multipart.NewWriter(body)
writer.SetBoundary(changesetBoundary)
for _, be := range t.BatchEntitySlice {
t.generateEntitySubset(&be, writer)
}
writer.Close()
return body, nil
}
// generateVerb generates the HTTP request VERB required for each changeset.
func generateVerb(op Operation) (string, error) {
switch op {
case InsertOp:
return http.MethodPost, nil
case DeleteOp:
return http.MethodDelete, nil
case ReplaceOp, InsertOrReplaceOp:
return http.MethodPut, nil
case MergeOp, InsertOrMergeOp:
return "MERGE", nil
default:
return "", errors.New("Unable to detect operation")
}
}
// generateQueryPath generates the query path for within the changesets
// For inserts it will just be a table query path (table name)
// but for other operations (modifying an existing entity) then
// the partition/row keys need to be generated.
func (t *TableBatch) generateQueryPath(op Operation, entity *Entity) string {
if op == InsertOp {
return entity.Table.buildPath()
}
return entity.buildPath()
}
// generateGenericOperationHeaders generates common headers for a given operation.
func generateGenericOperationHeaders(be *BatchEntity) map[string]string {
retval := map[string]string{}
for k, v := range defaultChangesetHeaders {
retval[k] = v
}
if be.Op == DeleteOp || be.Op == ReplaceOp || be.Op == MergeOp {
if be.Force || be.Entity.OdataEtag == "" {
retval["If-Match"] = "*"
} else {
retval["If-Match"] = be.Entity.OdataEtag
}
}
return retval
}
// generateEntitySubset generates body payload for particular batch entity
func (t *TableBatch) generateEntitySubset(batchEntity *BatchEntity, writer *multipart.Writer) error {
h := make(textproto.MIMEHeader)
h.Set(headerContentType, "application/http")
h.Set(headerContentTransferEncoding, "binary")
verb, err := generateVerb(batchEntity.Op)
if err != nil {
return err
}
genericOpHeadersMap := generateGenericOperationHeaders(batchEntity)
queryPath := t.generateQueryPath(batchEntity.Op, batchEntity.Entity)
uri := t.Table.tsc.client.getEndpoint(tableServiceName, queryPath, nil)
operationWriter, err := writer.CreatePart(h)
if err != nil {
return err
}
urlAndVerb := fmt.Sprintf("%s %s HTTP/1.1\r\n", verb, uri)
operationWriter.Write([]byte(urlAndVerb))
writeHeaders(genericOpHeadersMap, &operationWriter)
operationWriter.Write([]byte("\r\n")) // additional \r\n is needed per changeset separating the "headers" and the body.
// delete operation doesn't need a body.
if batchEntity.Op != DeleteOp {
//var e Entity = batchEntity.Entity
body, err := json.Marshal(batchEntity.Entity)
if err != nil {
return err
}
operationWriter.Write(body)
}
return nil
}
func writeHeaders(h map[string]string, writer *io.Writer) {
// This way it is guaranteed the headers will be written in a sorted order
var keys []string
for k := range h {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
(*writer).Write([]byte(fmt.Sprintf("%s: %s\r\n", k, h[k])))
}
}

View File

@@ -1,204 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strconv"
)
const (
headerAccept = "Accept"
headerEtag = "Etag"
headerPrefer = "Prefer"
headerXmsContinuation = "x-ms-Continuation-NextTableName"
)
// TableServiceClient contains operations for Microsoft Azure Table Storage
// Service.
type TableServiceClient struct {
client Client
auth authentication
}
// TableOptions includes options for some table operations
type TableOptions struct {
RequestID string
}
func (options *TableOptions) addToHeaders(h map[string]string) map[string]string {
if options != nil {
h = addToHeaders(h, "x-ms-client-request-id", options.RequestID)
}
return h
}
// QueryNextLink includes information for getting the next page of
// results in query operations
type QueryNextLink struct {
NextLink *string
ml MetadataLevel
}
// GetServiceProperties gets the properties of your storage account's table service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties
func (t *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) {
return t.client.getServiceProperties(tableServiceName, t.auth)
}
// SetServiceProperties sets the properties of your storage account's table service.
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties
func (t *TableServiceClient) SetServiceProperties(props ServiceProperties) error {
return t.client.setServiceProperties(props, tableServiceName, t.auth)
}
// GetTableReference returns a Table object for the specified table name.
func (t *TableServiceClient) GetTableReference(name string) *Table {
return &Table{
tsc: t,
Name: name,
}
}
// QueryTablesOptions includes options for some table operations
type QueryTablesOptions struct {
Top uint
Filter string
RequestID string
}
func (options *QueryTablesOptions) getParameters() (url.Values, map[string]string) {
query := url.Values{}
headers := map[string]string{}
if options != nil {
if options.Top > 0 {
query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10))
}
if options.Filter != "" {
query.Add(OdataFilter, options.Filter)
}
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
}
return query, headers
}
// QueryTables returns the tables in the storage account.
// You can use query options defined by the OData Protocol specification.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-tables
func (t *TableServiceClient) QueryTables(ml MetadataLevel, options *QueryTablesOptions) (*TableQueryResult, error) {
query, headers := options.getParameters()
uri := t.client.getEndpoint(tableServiceName, tablesURIPath, query)
return t.queryTables(uri, headers, ml)
}
// NextResults returns the next page of results
// from a QueryTables or a NextResults operation.
//
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-tables
// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination
func (tqr *TableQueryResult) NextResults(options *TableOptions) (*TableQueryResult, error) {
if tqr == nil {
return nil, errNilPreviousResult
}
if tqr.NextLink == nil {
return nil, errNilNextLink
}
headers := options.addToHeaders(map[string]string{})
return tqr.tsc.queryTables(*tqr.NextLink, headers, tqr.ml)
}
// TableQueryResult contains the response from
// QueryTables and QueryTablesNextResults functions.
type TableQueryResult struct {
OdataMetadata string `json:"odata.metadata"`
Tables []Table `json:"value"`
QueryNextLink
tsc *TableServiceClient
}
func (t *TableServiceClient) queryTables(uri string, headers map[string]string, ml MetadataLevel) (*TableQueryResult, error) {
if ml == EmptyPayload {
return nil, errEmptyPayload
}
headers = mergeHeaders(headers, t.client.getStandardHeaders())
headers[headerAccept] = string(ml)
resp, err := t.client.exec(http.MethodGet, uri, headers, nil, t.auth)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err := checkRespCode(resp, []int{http.StatusOK}); err != nil {
return nil, err
}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var out TableQueryResult
err = json.Unmarshal(respBody, &out)
if err != nil {
return nil, err
}
for i := range out.Tables {
out.Tables[i].tsc = t
}
out.tsc = t
nextLink := resp.Header.Get(http.CanonicalHeaderKey(headerXmsContinuation))
if nextLink == "" {
out.NextLink = nil
} else {
originalURI, err := url.Parse(uri)
if err != nil {
return nil, err
}
v := originalURI.Query()
v.Set(nextTableQueryParameter, nextLink)
newURI := t.client.getEndpoint(tableServiceName, tablesURIPath, v)
out.NextLink = &newURI
out.ml = ml
}
return &out, nil
}
func addBodyRelatedHeaders(h map[string]string, length int) map[string]string {
h[headerContentType] = "application/json"
h[headerContentLength] = fmt.Sprintf("%v", length)
h[headerAcceptCharset] = "UTF-8"
return h
}
func addReturnContentHeaders(h map[string]string, ml MetadataLevel) map[string]string {
if ml != EmptyPayload {
h[headerPrefer] = "return-content"
h[headerAccept] = string(ml)
} else {
h[headerPrefer] = "return-no-content"
// From API version 2015-12-11 onwards, Accept header is required
h[headerAccept] = string(NoMetadata)
}
return h
}

View File

@@ -1,260 +0,0 @@
package storage
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bytes"
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"time"
uuid "github.com/satori/go.uuid"
)
var (
fixedTime = time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6))
accountSASOptions = AccountSASTokenOptions{
Services: Services{
Blob: true,
},
ResourceTypes: ResourceTypes{
Service: true,
Container: true,
Object: true,
},
Permissions: Permissions{
Read: true,
Write: true,
Delete: true,
List: true,
Add: true,
Create: true,
Update: true,
Process: true,
},
Expiry: fixedTime,
UseHTTPS: true,
}
)
func (c Client) computeHmac256(message string) string {
h := hmac.New(sha256.New, c.accountKey)
h.Write([]byte(message))
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}
func currentTimeRfc1123Formatted() string {
return timeRfc1123Formatted(time.Now().UTC())
}
func timeRfc1123Formatted(t time.Time) string {
return t.Format(http.TimeFormat)
}
func timeRFC3339Formatted(t time.Time) string {
return t.Format("2006-01-02T15:04:05.0000000Z")
}
func mergeParams(v1, v2 url.Values) url.Values {
out := url.Values{}
for k, v := range v1 {
out[k] = v
}
for k, v := range v2 {
vals, ok := out[k]
if ok {
vals = append(vals, v...)
out[k] = vals
} else {
out[k] = v
}
}
return out
}
func prepareBlockListRequest(blocks []Block) string {
s := `<?xml version="1.0" encoding="utf-8"?><BlockList>`
for _, v := range blocks {
s += fmt.Sprintf("<%s>%s</%s>", v.Status, v.ID, v.Status)
}
s += `</BlockList>`
return s
}
func xmlUnmarshal(body io.Reader, v interface{}) error {
data, err := ioutil.ReadAll(body)
if err != nil {
return err
}
return xml.Unmarshal(data, v)
}
func xmlMarshal(v interface{}) (io.Reader, int, error) {
b, err := xml.Marshal(v)
if err != nil {
return nil, 0, err
}
return bytes.NewReader(b), len(b), nil
}
func headersFromStruct(v interface{}) map[string]string {
headers := make(map[string]string)
value := reflect.ValueOf(v)
for i := 0; i < value.NumField(); i++ {
key := value.Type().Field(i).Tag.Get("header")
if key != "" {
reflectedValue := reflect.Indirect(value.Field(i))
var val string
if reflectedValue.IsValid() {
switch reflectedValue.Type() {
case reflect.TypeOf(fixedTime):
val = timeRfc1123Formatted(reflectedValue.Interface().(time.Time))
case reflect.TypeOf(uint64(0)), reflect.TypeOf(uint(0)):
val = strconv.FormatUint(reflectedValue.Uint(), 10)
case reflect.TypeOf(int(0)):
val = strconv.FormatInt(reflectedValue.Int(), 10)
default:
val = reflectedValue.String()
}
}
if val != "" {
headers[key] = val
}
}
}
return headers
}
// merges extraHeaders into headers and returns headers
func mergeHeaders(headers, extraHeaders map[string]string) map[string]string {
for k, v := range extraHeaders {
headers[k] = v
}
return headers
}
func addToHeaders(h map[string]string, key, value string) map[string]string {
if value != "" {
h[key] = value
}
return h
}
func addTimeToHeaders(h map[string]string, key string, value *time.Time) map[string]string {
if value != nil {
h = addToHeaders(h, key, timeRfc1123Formatted(*value))
}
return h
}
func addTimeout(params url.Values, timeout uint) url.Values {
if timeout > 0 {
params.Add("timeout", fmt.Sprintf("%v", timeout))
}
return params
}
func addSnapshot(params url.Values, snapshot *time.Time) url.Values {
if snapshot != nil {
params.Add("snapshot", timeRFC3339Formatted(*snapshot))
}
return params
}
func getTimeFromHeaders(h http.Header, key string) (*time.Time, error) {
var out time.Time
var err error
outStr := h.Get(key)
if outStr != "" {
out, err = time.Parse(time.RFC1123, outStr)
if err != nil {
return nil, err
}
}
return &out, nil
}
// TimeRFC1123 is an alias for time.Time needed for custom Unmarshalling
type TimeRFC1123 time.Time
// UnmarshalXML is a custom unmarshaller that overrides the default time unmarshal which uses a different time layout.
func (t *TimeRFC1123) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var value string
d.DecodeElement(&value, &start)
parse, err := time.Parse(time.RFC1123, value)
if err != nil {
return err
}
*t = TimeRFC1123(parse)
return nil
}
// MarshalXML marshals using time.RFC1123.
func (t *TimeRFC1123) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
return e.EncodeElement(time.Time(*t).Format(time.RFC1123), start)
}
// returns a map of custom metadata values from the specified HTTP header
func getMetadataFromHeaders(header http.Header) map[string]string {
metadata := make(map[string]string)
for k, v := range header {
// Can't trust CanonicalHeaderKey() to munge case
// reliably. "_" is allowed in identifiers:
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
// http://tools.ietf.org/html/rfc7230#section-3.2
// ...but "_" is considered invalid by
// CanonicalMIMEHeaderKey in
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
// so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl".
k = strings.ToLower(k)
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
continue
}
// metadata["lol"] = content of the last X-Ms-Meta-Lol header
k = k[len(userDefinedMetadataHeaderPrefix):]
metadata[k] = v[len(v)-1]
}
if len(metadata) == 0 {
return nil
}
return metadata
}
// newUUID returns a new uuid using RFC 4122 algorithm.
func newUUID() (uuid.UUID, error) {
u := [16]byte{}
// Set all bits to randomly (or pseudo-randomly) chosen values.
_, err := rand.Read(u[:])
if err != nil {
return uuid.UUID{}, err
}
u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) // u.setVariant(ReservedRFC4122)
u[6] = (u[6] & 0xF) | (uuid.V4 << 4) // u.setVersion(V4)
return uuid.FromBytes(u[:])
}

View File

@@ -1,21 +0,0 @@
package version
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// Number contains the semantic version of this SDK.
const Number = "v36.1.0"

View File

@@ -1,17 +0,0 @@
------------------------------------------ START OF LICENSE -----------------------------------------
Azure VHD Utilities for Go
Copyright (c) Microsoft Corporation
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------- END OF LICENSE ------------------------------------------

37
vendor/modules.txt vendored
View File

@@ -35,16 +35,10 @@ gitee.com/chunanyong/dm/util
# github.com/360EntSecGroup-Skylar/excelize v1.4.0
## explicit
github.com/360EntSecGroup-Skylar/excelize
# github.com/Azure/azure-sdk-for-go v36.1.0+incompatible => github.com/Azure/azure-sdk-for-go v36.1.0+incompatible
## explicit
github.com/Azure/azure-sdk-for-go/storage
github.com/Azure/azure-sdk-for-go/version
# github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161
## explicit; go 1.16
github.com/Azure/go-ansiterm
github.com/Azure/go-ansiterm/winterm
# github.com/Azure/go-autorest v14.2.0+incompatible
## explicit
# github.com/Azure/go-autorest/autorest v0.9.6
## explicit; go 1.12
github.com/Azure/go-autorest/autorest
@@ -88,21 +82,6 @@ github.com/DataDog/zstd
# github.com/LeeEirc/terminalparser v0.0.0-20240205084113-fbf78c8480f2
## explicit; go 1.15
github.com/LeeEirc/terminalparser
# github.com/Microsoft/azure-vhd-utils v0.0.0-20181115010904-44cbada2ece3
## explicit
github.com/Microsoft/azure-vhd-utils/vhdcore
github.com/Microsoft/azure-vhd-utils/vhdcore/bat
github.com/Microsoft/azure-vhd-utils/vhdcore/block
github.com/Microsoft/azure-vhd-utils/vhdcore/block/bitmap
github.com/Microsoft/azure-vhd-utils/vhdcore/common
github.com/Microsoft/azure-vhd-utils/vhdcore/diskstream
github.com/Microsoft/azure-vhd-utils/vhdcore/footer
github.com/Microsoft/azure-vhd-utils/vhdcore/header
github.com/Microsoft/azure-vhd-utils/vhdcore/header/parentlocator
github.com/Microsoft/azure-vhd-utils/vhdcore/reader
github.com/Microsoft/azure-vhd-utils/vhdcore/validator
github.com/Microsoft/azure-vhd-utils/vhdcore/vhdfile
github.com/Microsoft/azure-vhd-utils/vhdcore/writer
# github.com/Microsoft/go-winio v0.6.2
## explicit; go 1.21
github.com/Microsoft/go-winio
@@ -2264,7 +2243,7 @@ sigs.k8s.io/structured-merge-diff/v4/value
# sigs.k8s.io/yaml v1.2.0
## explicit; go 1.12
sigs.k8s.io/yaml
# yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20251127081629-9d8a6d1fe822
# yunion.io/x/cloudmux v0.3.10-0-alpha.1.0.20251130090640-988b91734be4
## explicit; go 1.24
yunion.io/x/cloudmux/pkg/apis
yunion.io/x/cloudmux/pkg/apis/billing
@@ -2282,6 +2261,19 @@ yunion.io/x/cloudmux/pkg/multicloud/azure
yunion.io/x/cloudmux/pkg/multicloud/azure/concurrent
yunion.io/x/cloudmux/pkg/multicloud/azure/progress
yunion.io/x/cloudmux/pkg/multicloud/azure/provider
yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore
yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/bat
yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/block
yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/block/bitmap
yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/common
yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/diskstream
yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/footer
yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/header
yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/header/parentlocator
yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/reader
yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/validator
yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/vhdfile
yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/writer
yunion.io/x/cloudmux/pkg/multicloud/baidu
yunion.io/x/cloudmux/pkg/multicloud/baidu/provider
yunion.io/x/cloudmux/pkg/multicloud/bingocloud
@@ -2434,5 +2426,4 @@ zombiezen.com/go/sqlite
zombiezen.com/go/sqlite/fs
zombiezen.com/go/sqlite/sqlitex
# github.com/influxdata/promql/v2 => github.com/zexi/promql/v2 v2.12.1
# github.com/Azure/azure-sdk-for-go => github.com/Azure/azure-sdk-for-go v36.1.0+incompatible
# github.com/docker/docker => github.com/docker/docker v20.10.27+incompatible

View File

@@ -1,10 +1,16 @@
package azure
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"strings"
"time"
@@ -22,6 +28,7 @@ const (
SERVICE_MANAGEMENT = "management"
SERVICE_GRAPH = "graph"
SERVICE_AAD = "aad"
SERVICE_STORAGE = "storage"
)
var azServices = map[string]map[string]string{
@@ -37,6 +44,10 @@ var azServices = map[string]map[string]string{
ENV_NAME_GLOBAL: "https://login.microsoftonline.com",
ENV_NAME_CHINA: "https://login.chinacloudapi.cn",
},
SERVICE_STORAGE: {
ENV_NAME_GLOBAL: "https://%s.blob.core.windows.net",
ENV_NAME_CHINA: "https://%s.blob.core.chinacloudapi.cn",
},
}
type Token struct {
@@ -145,6 +156,249 @@ func (self *SAzureClient) _post_v2(service string, resource, apiVersion string,
return self._request_v2(service, httputils.POST, resource, apiVersion, nil, body)
}
func (region *SRegion) list_storage_v2(accessKey, bucket string, container string, params url.Values, retVal interface{}) error {
return region.client.list_storage_v2(accessKey, bucket, container, params, retVal)
}
func (region *SRegion) put_storage_v2(accessKey, bucket string, container string, header http.Header, params url.Values, body io.Reader, retVal interface{}) error {
return region.client.put_storage_v2(accessKey, bucket, container, header, params, body, retVal)
}
func (self *SAzureClient) list_storage_v2(accessKey, bucket string, container string, params url.Values, retVal interface{}) error {
_, _, err := self.__storage_request(accessKey, bucket, container, httputils.GET, nil, params, nil, retVal)
if err != nil {
return errors.Wrapf(err, "list_storage_v2")
}
return nil
}
func (cli *SAzureClient) delete_storage_v2(accessKey, bucket string, container string, header http.Header, params url.Values) error {
_, _, err := cli.__storage_request(accessKey, bucket, container, httputils.DELETE, header, params, nil, nil)
if err != nil {
return errors.Wrapf(err, "delete_storage_v2")
}
return nil
}
func (cli *SAzureClient) put_storage_v2(accessKey, bucket string, container string, header http.Header, params url.Values, body io.Reader, retVal interface{}) error {
_, _, err := cli.__storage_request(accessKey, bucket, container, httputils.PUT, header, params, body, retVal)
if err != nil {
return errors.Wrapf(err, "put_storage_v2")
}
return nil
}
func (cli *SAzureClient) put_header_storage_v2(accessKey, bucket string, container string, header http.Header, params url.Values) (http.Header, error) {
header, _, err := cli.__storage_request(accessKey, bucket, container, httputils.PUT, header, params, nil, nil)
if err != nil {
return nil, errors.Wrapf(err, "put_header_storage_v2")
}
return header, nil
}
func (region *SRegion) get_header_storage_v2(accessKey, bucket string, container string, params url.Values) (http.Header, error) {
header, _, err := region.client.__storage_request(accessKey, bucket, container, httputils.GET, nil, params, nil, nil)
if err != nil {
return nil, errors.Wrapf(err, "get_header_storage_v2")
}
return header, nil
}
func (region *SRegion) get_body_storage_v2(accessKey, bucket string, container string, header http.Header, params url.Values) (io.Reader, error) {
_, body, err := region.client.__storage_request(accessKey, bucket, container, httputils.GET, header, params, nil, nil)
if err != nil {
return nil, errors.Wrapf(err, "get_body_storage_v2")
}
return body, nil
}
func (region *SRegion) header_storage_v2(accessKey, bucket string, container string, params url.Values) (http.Header, error) {
header, _, err := region.client.__storage_request(accessKey, bucket, container, httputils.HEAD, nil, params, nil, nil)
if err != nil {
return nil, errors.Wrapf(err, "header_storage_v2")
}
return header, nil
}
func computeHmac256(message string, accountKey string) (string, error) {
key, err := base64.StdEncoding.DecodeString(accountKey)
if err != nil {
return "", errors.Wrapf(err, "base64.StdEncoding.DecodeString(%s)", accountKey)
}
h := hmac.New(sha256.New, key)
h.Write([]byte(message))
return base64.StdEncoding.EncodeToString(h.Sum(nil)), nil
}
func buildCanonicalizedHeader(headers http.Header) string {
cm := make(map[string]string)
for k := range headers {
headerName := strings.TrimSpace(strings.ToLower(k))
if strings.HasPrefix(headerName, "x-ms-") {
cm[headerName] = headers.Get(k)
}
}
if len(cm) == 0 {
return ""
}
keys := []string{}
for key := range cm {
keys = append(keys, key)
}
sort.Strings(keys)
ch := bytes.NewBufferString("")
for _, key := range keys {
ch.WriteString(key)
ch.WriteRune(':')
ch.WriteString(cm[key])
ch.WriteRune('\n')
}
return strings.TrimSuffix(ch.String(), "\n")
}
func buildCanonicalizedString(verb httputils.THttpMethod, headers http.Header, canonicalizedResource string) string {
contentLength := headers.Get("Content-Length")
if contentLength == "0" {
contentLength = ""
}
date := ""
return strings.Join([]string{
strings.ToUpper(string(verb)),
headers.Get("Content-Encoding"),
headers.Get("Content-Language"),
contentLength,
headers.Get("Content-MD5"),
headers.Get("Content-Type"),
date,
headers.Get("If-Modified-Since"),
headers.Get("If-Match"),
headers.Get("If-None-Match"),
headers.Get("If-Unmodified-Since"),
headers.Get("Range"),
buildCanonicalizedHeader(headers),
canonicalizedResource,
}, "\n")
}
func buildCanonicalizedResource(bucket, uri string) (string, error) {
u, err := url.Parse(uri)
if err != nil {
return "", fmt.Errorf("url.Parse: %v", err)
}
cr := bytes.NewBufferString("")
cr.WriteString("/")
cr.WriteString(bucket)
if len(u.Path) > 0 {
// Any portion of the CanonicalizedResource string that is derived from
// the resource's URI should be encoded exactly as it is in the URI.
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
cr.WriteString(u.EscapedPath())
}
params, err := url.ParseQuery(u.RawQuery)
if err != nil {
return "", fmt.Errorf("url.ParseQuery: %v", err)
}
// See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277
if len(params) > 0 {
cr.WriteString("\n")
keys := []string{}
for key := range params {
keys = append(keys, key)
}
sort.Strings(keys)
completeParams := []string{}
for _, key := range keys {
if len(params[key]) > 1 {
sort.Strings(params[key])
}
completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")))
}
cr.WriteString(strings.Join(completeParams, "\n"))
}
return cr.String(), nil
}
func (self *SAzureClient) __storage_request(accessKey, bucket string, container string, method httputils.THttpMethod, header http.Header, params url.Values, body io.Reader, retVal interface{}) (http.Header, io.Reader, error) {
if params == nil {
params = url.Values{}
}
domain := fmt.Sprintf(azServices[SERVICE_STORAGE][self.envName], bucket)
url := fmt.Sprintf("%s/%s", strings.TrimSuffix(domain, "/"), container)
if len(params) > 0 {
url += fmt.Sprintf("?%s", params.Encode())
}
utcTime := time.Now().UTC().Format(http.TimeFormat)
if gotypes.IsNil(header) {
header = http.Header{}
}
header.Set("x-ms-date", utcTime)
header.Set("x-ms-version", "2018-03-28")
canRes, err := buildCanonicalizedResource(bucket, url)
if err != nil {
return nil, nil, errors.Wrapf(err, "buildCanonicalizedResource")
}
canString := buildCanonicalizedString(method, header, canRes)
// 4. 计算 Shared Key 签名
signature, err := computeHmac256(canString, accessKey)
if err != nil {
return nil, nil, errors.Wrapf(err, "computeHmac256")
}
header.Set("Authorization", fmt.Sprintf("SharedKey %s:%s", bucket, signature))
resp, err := httputils.Request(self.client(), self.ctx, method, url, header, body, self.debug)
if err != nil {
return nil, nil, errors.Wrapf(err, "Request")
}
if resp.StatusCode >= 400 {
defer httputils.CloseResponse(resp)
data, err := io.ReadAll(resp.Body)
if err != nil {
return nil, nil, errors.Wrapf(err, "ReadAll")
}
return nil, nil, errors.Errorf("resp: %d url: %s header: %v, data: %s", resp.StatusCode, url, header, string(data))
}
if gotypes.IsNil(retVal) {
return resp.Header, resp.Body, nil
}
defer httputils.CloseResponse(resp)
data, err := io.ReadAll(resp.Body)
if err != nil {
return nil, nil, errors.Wrapf(err, "ReadAll")
}
err = xml.Unmarshal(data, retVal)
if err != nil {
return nil, nil, errors.Wrapf(err, "xml.Unmarshal")
}
return resp.Header, resp.Body, nil
}
func (self *SAzureClient) _request_v2(service string, method httputils.THttpMethod, resource, apiVersion string, params url.Values, body map[string]interface{}) (jsonutils.JSONObject, error) {
value := []jsonutils.JSONObject{}
if gotypes.IsNil(params) {

View File

@@ -16,12 +16,13 @@ package azure
import (
"context"
"fmt"
"net/http"
"net/url"
"strings"
"github.com/Azure/azure-sdk-for-go/storage"
"yunion.io/x/log"
"yunion.io/x/pkg/errors"
"yunion.io/x/pkg/utils"
"yunion.io/x/cloudmux/pkg/cloudprovider"
)
@@ -41,7 +42,7 @@ func (o *SObject) GetAcl() cloudprovider.TBucketACLType {
}
func (o *SObject) SetAcl(aclStr cloudprovider.TBucketACLType) error {
return nil
return cloudprovider.ErrNotSupported
}
func (o *SObject) getBlobName() string {
@@ -52,66 +53,94 @@ func (o *SObject) getBlobName() string {
}
}
func (o *SObject) getBlobRef() (*storage.Blob, error) {
blobName := o.getBlobName()
if len(blobName) == 0 {
return nil, nil
}
contRef, err := o.container.getContainerRef()
func (sa *SStorageAccount) GetObjectMeta(object string) (http.Header, error) {
accessKey, err := sa.GetAccountKey()
if err != nil {
return nil, errors.Wrap(err, "src getContainerRef")
return nil, errors.Wrap(err, "GetAccountKey")
}
blobRef := contRef.GetBlobReference(blobName)
return blobRef, nil
ret := http.Header{}
params := url.Values{}
header, err := sa.region.header_storage_v2(accessKey, sa.Name, object, params)
if err != nil {
return nil, errors.Wrap(err, "header_storage_v2")
}
for k := range header {
if strings.HasPrefix(strings.ToLower(k), "x-ms-meta-") {
ret.Add(strings.TrimPrefix(strings.ToLower(k), "x-ms-meta-"), header.Get(k))
}
if utils.IsInStringArray(k, []string{
http.CanonicalHeaderKey(cloudprovider.META_HEADER_CACHE_CONTROL),
http.CanonicalHeaderKey(cloudprovider.META_HEADER_CONTENT_TYPE),
http.CanonicalHeaderKey(cloudprovider.META_HEADER_CONTENT_DISPOSITION),
http.CanonicalHeaderKey(cloudprovider.META_HEADER_CONTENT_ENCODING),
http.CanonicalHeaderKey(cloudprovider.META_HEADER_CONTENT_LANGUAGE),
http.CanonicalHeaderKey(cloudprovider.META_HEADER_CONTENT_MD5),
}) {
ret.Set(k, header.Get(k))
}
}
return ret, nil
}
func (o *SObject) GetMeta() http.Header {
if o.Meta != nil {
return o.Meta
}
blobRef, err := o.getBlobRef()
objectName := fmt.Sprintf("%s/%s", o.container.Name, o.getBlobName())
var err error
meta, err := o.container.storageaccount.GetObjectMeta(objectName)
if err != nil {
log.Errorf("o.getBlobRef fail %s", err)
return nil
}
if blobRef == nil {
return nil
}
err = blobRef.GetMetadata(nil)
if err != nil {
log.Errorf("blobRef.GetMetadata fail %s", err)
}
err = blobRef.GetProperties(nil)
if err != nil {
log.Errorf("blobRef.GetProperties fail %s", err)
}
meta := getBlobRefMeta(blobRef)
o.Meta = meta
return o.Meta
return meta
}
func (o *SObject) SetMeta(ctx context.Context, meta http.Header) error {
blobRef, err := o.getBlobRef()
func (sa *SStorageAccount) SetObjectMeta(ctx context.Context, object string, meta http.Header) error {
accessKey, err := sa.GetAccountKey()
if err != nil {
return errors.Wrap(err, "o.getBlobRef")
return errors.Wrap(err, "GetAccountKey")
}
if blobRef == nil {
return cloudprovider.ErrNotSupported
}
propChanged, metaChanged := setBlobRefMeta(blobRef, meta)
if propChanged {
propOpts := storage.SetBlobPropertiesOptions{}
err := blobRef.SetProperties(&propOpts)
if err != nil {
return errors.Wrap(err, "blob.SetProperties")
properties := http.Header{}
metadata := http.Header{}
for k := range meta {
if utils.IsInStringArray(k, []string{
cloudprovider.META_HEADER_CACHE_CONTROL,
cloudprovider.META_HEADER_CONTENT_TYPE,
cloudprovider.META_HEADER_CONTENT_DISPOSITION,
cloudprovider.META_HEADER_CONTENT_ENCODING,
cloudprovider.META_HEADER_CONTENT_LANGUAGE,
cloudprovider.META_HEADER_CONTENT_MD5,
}) {
properties.Set(fmt.Sprintf("x-ms-blob-%s", strings.ToLower(k)), meta.Get(k))
} else {
metadata.Set(fmt.Sprintf("x-ms-meta-%s", k), meta.Get(k))
}
}
if metaChanged {
metaOpts := storage.SetBlobMetadataOptions{}
err := blobRef.SetMetadata(&metaOpts)
if len(properties) > 0 {
params := url.Values{}
params.Set("comp", "properties")
err = sa.region.client.put_storage_v2(accessKey, sa.Name, object, properties, params, nil, nil)
if err != nil {
return errors.Wrap(err, "blob.SetMetadata")
return errors.Wrap(err, "put_storage_v2 set properties")
}
}
if len(metadata) > 0 {
params := url.Values{}
params.Set("comp", "metadata")
err = sa.region.client.put_storage_v2(accessKey, sa.Name, object, metadata, params, nil, nil)
if err != nil {
return errors.Wrap(err, "put_storage_v2 set metadata")
}
}
return nil
}
func (o *SObject) SetMeta(ctx context.Context, meta http.Header) error {
fileName := fmt.Sprintf("%s/%s", o.container.Name, o.getBlobName())
return o.container.storageaccount.SetObjectMeta(ctx, fileName, meta)
}

File diff suppressed because it is too large Load Diff

View File

@@ -20,14 +20,15 @@ import (
"fmt"
"io"
"math"
"net/http"
"net/url"
"time"
"github.com/Azure/azure-sdk-for-go/storage"
"github.com/Microsoft/azure-vhd-utils/vhdcore/block/bitmap"
"github.com/Microsoft/azure-vhd-utils/vhdcore/common"
"github.com/Microsoft/azure-vhd-utils/vhdcore/diskstream"
"github.com/Microsoft/azure-vhd-utils/vhdcore/footer"
"github.com/Microsoft/azure-vhd-utils/vhdcore/validator"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/block/bitmap"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/common"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/diskstream"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/footer"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/validator"
"yunion.io/x/cloudmux/pkg/multicloud/azure/concurrent"
"yunion.io/x/cloudmux/pkg/multicloud/azure/progress"
@@ -44,25 +45,24 @@ type DataWithRange struct {
}
type DiskUploadContext struct {
VhdStream *diskstream.DiskStream // The stream whose ranges needs to be uploaded
AlreadyProcessedBytes int64 // The size in bytes already uploaded
UploadableRanges []*common.IndexRange // The subset of stream ranges to be uploaded
BlobServiceClient storage.BlobStorageClient // The client to make Azure blob service API calls
ContainerName string // The container in which page blob resides
BlobName string // The destination page blob name
Parallelism int // The number of concurrent goroutines to be used for upload
Resume bool // Indicate whether this is a new or resuming upload
MD5Hash []byte // MD5Hash to be set in the page blob properties once upload finishes
StorageAccount *SStorageAccount // The storage account to use for authentication
AccessKey string // The access key to use for authentication
VhdStream *diskstream.DiskStream // The stream whose ranges needs to be uploaded
AlreadyProcessedBytes int64 // The size in bytes already uploaded
UploadableRanges []*common.IndexRange // The subset of stream ranges to be uploaded
ContainerName string // The container in which page blob resides
BlobName string // The destination page blob name
Parallelism int // The number of concurrent goroutines to be used for upload
Resume bool // Indicate whether this is a new or resuming upload
MD5Hash []byte // MD5Hash to be set in the page blob properties once upload finishes
}
// oneMB is one MegaByte
//
const oneMB = float64(1048576)
// Upload uploads the disk ranges described by the parameter cxt, this parameter describes the disk stream to
// read from, the ranges of the stream to read, the destination blob and it's container, the client to communicate
// with Azure storage and the number of parallel go-routines to use for upload.
//
func Upload(cxt *DiskUploadContext, callback func(float32)) error {
// Get the channel that contains stream of disk data to upload
dataWithRangeChan, streamReadErrChan := GetDataWithRanges(cxt.VhdStream, cxt.UploadableRanges)
@@ -109,20 +109,23 @@ L:
}
// Create work request
//
containerClinet := cxt.BlobServiceClient.GetContainerReference(cxt.ContainerName)
blobClient := containerClinet.GetBlobReference(cxt.BlobName)
client := cxt.StorageAccount.region.client
req := &concurrent.Request{
Work: func() error {
err := blobClient.WriteRange(
storage.BlobRange{Start: uint64(dataWithRange.Range.Start), End: uint64(dataWithRange.Range.End)},
bytes.NewReader(dataWithRange.Data),
&storage.PutPageOptions{},
)
if err == nil {
uploadProgress.ReportBytesProcessedCount(dataWithRange.Range.Length())
params := url.Values{}
params.Set("comp", "page")
header := http.Header{}
header.Set("x-ms-blob-type", "PageBlob")
header.Set("x-ms-page-write", "update")
header.Set("x-ms-range", fmt.Sprintf("bytes=%d-%d", dataWithRange.Range.Start, dataWithRange.Range.End))
header.Set("Content-Length", fmt.Sprintf("%v", dataWithRange.Range.Length()))
file := fmt.Sprintf("%s/%s", cxt.ContainerName, cxt.BlobName)
err = client.put_storage_v2(cxt.AccessKey, cxt.StorageAccount.Name, file, header, params, bytes.NewReader(dataWithRange.Data), nil)
if err != nil {
return errors.New(err.Error())
}
return err
uploadProgress.ReportBytesProcessedCount(dataWithRange.Range.Length())
return nil
},
ShouldRetry: func(e error) bool {
return true
@@ -162,7 +165,6 @@ L:
// It returns two channels, a data channel to stream the disk ranges and a channel to send any error while reading
// the disk. On successful completion the data channel will be closed. the caller must not expect any more value in
// the data channel if the error channel is signaled.
//
func GetDataWithRanges(stream *diskstream.DiskStream, ranges []*common.IndexRange) (<-chan *DataWithRange, <-chan error) {
dataWithRangeChan := make(chan *DataWithRange, 0)
errorChan := make(chan error, 0)
@@ -191,7 +193,6 @@ func GetDataWithRanges(stream *diskstream.DiskStream, ranges []*common.IndexRang
// readAndPrintProgress reads the progress records from the given progress channel and output it. It reads the
// progress record until the channel is closed.
//
func readAndPrintProgress(progressChan <-chan *progress.Record, resume bool, callback func(float32)) {
var spinChars = [4]rune{'\\', '|', '/', '-'}
s := time.Time{}
@@ -326,7 +327,6 @@ func LocateNonEmptyRangeIndices(stream *diskstream.DiskStream, ranges []*common.
}
// isAllZero returns true if the given byte slice contain all zeros
//
func isAllZero(buf []byte) bool {
l := len(buf)
j := 0

View File

@@ -3,7 +3,7 @@ package bat
import (
"math"
"github.com/Microsoft/azure-vhd-utils/vhdcore"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore"
)
// BlockAllocationTable type represents the Block Allocation Table (BAT) of the disk, BAT served as
@@ -20,7 +20,6 @@ import (
// the 'block bitmap section'. Each bit in the bitmap indicates the state of the corresponding sector
// in 'data section', 1 indicates sector contains valid data, 0 indicates the sector have never been
// modified.
//
type BlockAllocationTable struct {
BATEntriesCount uint32
BAT []uint32
@@ -30,7 +29,6 @@ type BlockAllocationTable struct {
// NewBlockAllocationTable creates an instance of BlockAllocationTable, BAT is the block allocation table,
// each entry in this table is the absolute sector offset to a block, blockSize is the size of block's
// 'data section' in bytes.
//
func NewBlockAllocationTable(blockSize uint32, bat []uint32) *BlockAllocationTable {
return &BlockAllocationTable{BATEntriesCount: uint32(len(bat)), blockSize: blockSize, BAT: bat}
}
@@ -41,7 +39,6 @@ func NewBlockAllocationTable(blockSize uint32, bat []uint32) *BlockAllocationTab
// required to store the bitmap.
// As per vhd specification sectors per block must be power of two. The sector length is always 512 bytes.
// This means the block size will be power of two as well e.g. 512 * 2^3, 512 * 2^4, 512 * 2^5 etc..
//
func (b *BlockAllocationTable) GetBitmapSizeInBytes() int32 {
return int32(b.blockSize / uint32(vhdcore.VhdSectorLength) / 8)
}
@@ -58,20 +55,17 @@ func (b *BlockAllocationTable) GetSectorPaddedBitmapSizeInBytes() int32 {
// GetBitmapAddress returns the address of the 'block bitmap section' of a given block. Address is the
// absolute byte offset of the 'block bitmap section'. A block consists of 'block bitmap section' and
// 'data section'
//
func (b *BlockAllocationTable) GetBitmapAddress(blockIndex uint32) int64 {
return int64(b.BAT[blockIndex]) * vhdcore.VhdSectorLength
}
// GetBlockDataAddress returns the address of the 'data section' of a given block. Address is the absolute
// byte offset of the 'data section'. A block consists of 'block bitmap section' and 'data section'
//
func (b *BlockAllocationTable) GetBlockDataAddress(blockIndex uint32) int64 {
return b.GetBitmapAddress(blockIndex) + int64(b.GetSectorPaddedBitmapSizeInBytes())
}
// HasData returns true if the given block has not yet expanded hence contains no data.
//
func (b *BlockAllocationTable) HasData(blockIndex uint32) bool {
return blockIndex != vhdcore.VhdNoDataInt && b.BAT[blockIndex] != vhdcore.VhdNoDataInt
}

View File

@@ -1,13 +1,12 @@
package bat
import (
"github.com/Microsoft/azure-vhd-utils/vhdcore/header"
"github.com/Microsoft/azure-vhd-utils/vhdcore/reader"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/header"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/reader"
)
// BlockAllocationTableFactory type is used to create BlockAllocationTable instance by reading BAT
// section of the disk which follows the header
//
type BlockAllocationTableFactory struct {
vhdReader *reader.VhdReader
vhdHeader *header.Header
@@ -17,7 +16,6 @@ type BlockAllocationTableFactory struct {
// to create BlockAllocationTable instance by reading BAT section of the Vhd.
// vhdReader is the reader to be used to read the entry, vhdHeader is the header structure representing
// the disk header.
//
func NewBlockAllocationFactory(vhdReader *reader.VhdReader, vhdHeader *header.Header) *BlockAllocationTableFactory {
return &BlockAllocationTableFactory{
vhdReader: vhdReader,
@@ -27,7 +25,6 @@ func NewBlockAllocationFactory(vhdReader *reader.VhdReader, vhdHeader *header.He
// Create creates a BlockAllocationTable instance by reading the BAT section of the disk.
// This function return error if any error occurs while reading or parsing the BAT entries.
//
func (f *BlockAllocationTableFactory) Create() (*BlockAllocationTable, error) {
var err error
batEntriesCount := f.vhdHeader.MaxTableEntries

View File

@@ -1,12 +1,11 @@
package bitmap
import (
"github.com/Microsoft/azure-vhd-utils/vhdcore/bat"
"github.com/Microsoft/azure-vhd-utils/vhdcore/reader"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/bat"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/reader"
)
// Factory type is used to create BitMap instance by reading 'bitmap section' of a block.
//
type Factory struct {
vhdReader *reader.VhdReader
blockAllocationTable *bat.BlockAllocationTable
@@ -16,7 +15,6 @@ type Factory struct {
// the 'bitmap section' of a block. vhdReader is the reader to read the disk, blockAllocationTable wraps
// the disk's BAT table, which has one entry per block, this is used to retrieve the absolute offset to
// the beginning of the 'bitmap section' of a block and the size of the 'bitmap section'.
//
func NewFactory(vhdReader *reader.VhdReader, blockAllocationTable *bat.BlockAllocationTable) *Factory {
return &Factory{vhdReader: vhdReader, blockAllocationTable: blockAllocationTable}
}
@@ -24,7 +22,6 @@ func NewFactory(vhdReader *reader.VhdReader, blockAllocationTable *bat.BlockAllo
// Create creates a BitMap instance by reading block's 'bitmap section', block is the index of the
// block entry in the BAT whose 'bitmap section' needs to be read.
// This function return error if any error occurs while reading or parsing the block's bitmap.
//
func (f *Factory) Create(blockIndex uint32) (*BitMap, error) {
bitmapAbsoluteByteOffset := f.blockAllocationTable.GetBitmapAddress(blockIndex)
bitmapSizeInBytes := f.blockAllocationTable.GetBitmapSizeInBytes()

View File

@@ -3,15 +3,14 @@ package block
import (
"fmt"
"github.com/Microsoft/azure-vhd-utils/vhdcore"
"github.com/Microsoft/azure-vhd-utils/vhdcore/block/bitmap"
"github.com/Microsoft/azure-vhd-utils/vhdcore/common"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/block/bitmap"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/common"
)
// Block type represents Block of a vhd. A block of a dynamic or differential vhd starts with a
// 'bitmap' section followed by the 'data' section, in case of fixed vhd the entire block is used
// to store the 'data'.
//
type Block struct {
// BlockIndex is the index of the block, block indices are consecutive values starting from 0
// for the first block.
@@ -46,7 +45,6 @@ type Block struct {
// Data returns the block data, the content of entire block in case of fixed vhd and the content
// of block's data section in case of dynamic and differential vhd.
//
func (b *Block) Data() ([]byte, error) {
if b.blockData == nil {
var err error
@@ -60,20 +58,17 @@ func (b *Block) Data() ([]byte, error) {
// GetSector returns an instance of Sector representing a sector with the given Id in this block.
// The parameter sectorIndex is the index of the sector in this block to read.
//
func (b *Block) GetSector(sectorIndex uint32) (*Sector, error) {
return b.blockFactory.GetSector(b, sectorIndex)
}
// GetSectorCount returns the number of sectors in the block.
//
func (b *Block) GetSectorCount() int64 {
return b.LogicalRange.Length() / vhdcore.VhdSectorLength
}
// String returns formatted representation of the block
// This satisfies Stringer interface.
//
func (b *Block) String() string {
return fmt.Sprintf("Block:%d", b.BlockIndex)
}

View File

@@ -3,11 +3,10 @@ package block
import (
"fmt"
"github.com/Microsoft/azure-vhd-utils/vhdcore/footer"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/footer"
)
// DataReadError is the error type representing block data read error.
//
type DataReadError struct {
BlockIndex uint32
DiskType footer.DiskType
@@ -15,13 +14,11 @@ type DataReadError struct {
}
// Error returns the string representation of the BlockDataReadError instance.
//
func (e *DataReadError) Error() string {
return fmt.Sprintf("Error in Reading block '%d', DiskType - %s : %s", e.BlockIndex, e.DiskType, e.err)
}
// GetInnerErr returns the inner error, this method satisfies InnerErr interface
//
func (e *DataReadError) GetInnerErr() error {
return e.err
}
@@ -29,7 +26,6 @@ func (e *DataReadError) GetInnerErr() error {
// NewDataReadError returns a new DataReadError instance.
// The parameter blockIndex represents index of the block whose bitmap failed to parse
// The parameter err is the underlying error for parse failure.
//
func NewDataReadError(blockIndex uint32, diskType footer.DiskType, err error) error {
return &DataReadError{
BlockIndex: blockIndex,

View File

@@ -1,9 +1,9 @@
package block
import (
"github.com/Microsoft/azure-vhd-utils/vhdcore"
"github.com/Microsoft/azure-vhd-utils/vhdcore/block/bitmap"
"github.com/Microsoft/azure-vhd-utils/vhdcore/common"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/block/bitmap"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/common"
)
// DifferencingDiskBlockFactory is a type which is used for following purposes
@@ -12,7 +12,6 @@ import (
// To get the block size of the block in differencing disk
// To get a Sector instance representing sector of differencing disk's block
// To get the logical footer range of fixed disk generated from the differencing disk and it's parents.
//
type DifferencingDiskBlockFactory struct {
params *FactoryParams
bitmapFactory *bitmap.Factory
@@ -24,7 +23,6 @@ type DifferencingDiskBlockFactory struct {
// NewDifferencingDiskBlockFactory creates a DifferencingDiskBlockFactory instance which can be used to
// create a Block objects representing differential disk block of a size specified in header BlockSize
// field parameter vhdFile represents the differencing disk.
//
func NewDifferencingDiskBlockFactory(params *FactoryParams) *DifferencingDiskBlockFactory {
blockFactory := &DifferencingDiskBlockFactory{params: params}
@@ -43,20 +41,17 @@ func NewDifferencingDiskBlockFactory(params *FactoryParams) *DifferencingDiskBlo
}
// GetBlockCount returns the number of blocks in the differential disk.
//
func (f *DifferencingDiskBlockFactory) GetBlockCount() int64 {
return int64(f.params.BlockAllocationTable.BATEntriesCount)
}
// GetBlockSize returns the size of the 'data section' of block in bytes in the differential disk.
//
func (f *DifferencingDiskBlockFactory) GetBlockSize() int64 {
return int64(f.params.VhdHeader.BlockSize)
}
// GetFooterRange returns the logical range of the footer when converting this differential vhd to
// fixed logical range of footer is the absolute start and end byte offset of the footer.
//
func (f *DifferencingDiskBlockFactory) GetFooterRange() *common.IndexRange {
return common.NewIndexRangeFromLength(f.GetBlockCount()*f.GetBlockSize(), vhdcore.VhdFooterSize)
}
@@ -65,7 +60,6 @@ func (f *DifferencingDiskBlockFactory) GetFooterRange() *common.IndexRange {
// identifies the block. If the block to be read is marked as empty in the differencing disk BAT then this
// method will query parent disk for the same block. This function return error if the block cannot be created
// due to any read error.
//
func (f *DifferencingDiskBlockFactory) Create(blockIndex uint32) (*Block, error) {
if !f.params.BlockAllocationTable.HasData(blockIndex) {
if f.cachedBlock == nil || f.cachedBlock.BlockIndex != blockIndex {
@@ -104,7 +98,6 @@ func (f *DifferencingDiskBlockFactory) Create(blockIndex uint32) (*Block, error)
// read is marked as empty in the block's bitmap then this method will query parent disk for the same sector.
// This function return error if the sector cannot be created due to any read error or if the requested sector
// index is invalid.
//
func (f *DifferencingDiskBlockFactory) GetSector(block *Block, sectorIndex uint32) (*Sector, error) {
blockIndex := block.BlockIndex
if block.IsEmpty {
@@ -132,7 +125,6 @@ func (f *DifferencingDiskBlockFactory) GetSector(block *Block, sectorIndex uint3
// GetBitmapFactory returns an instance of BitmapFactory that can be used to create the bitmap of a block
// by reading block from differencing disk.
//
func (f *DifferencingDiskBlockFactory) GetBitmapFactory() *bitmap.Factory {
return f.bitmapFactory
}

View File

@@ -1,15 +1,14 @@
package block
import (
"github.com/Microsoft/azure-vhd-utils/vhdcore/bat"
"github.com/Microsoft/azure-vhd-utils/vhdcore/footer"
"github.com/Microsoft/azure-vhd-utils/vhdcore/reader"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/bat"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/footer"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/reader"
)
// DifferencingDiskBlockReader type satisfies BlockDataReader interface,
// implementation of BlockDataReader::Read by this type can read the 'data' section
// of a differencing disk's block.
//
type DifferencingDiskBlockReader struct {
vhdReader *reader.VhdReader
blockAllocationTable *bat.BlockAllocationTable
@@ -22,7 +21,6 @@ type DifferencingDiskBlockReader struct {
// The parameter vhdReader is the reader to read the disk
// The parameter blockAllocationTable represents the disk's BAT
// The parameter blockSizeInBytes is the size of the differencing disk block
//
func NewDifferencingDiskBlockReader(vhdReader *reader.VhdReader, blockAllocationTable *bat.BlockAllocationTable, blockSizeInBytes uint32) *DifferencingDiskBlockReader {
return &DifferencingDiskBlockReader{
vhdReader: vhdReader,
@@ -34,7 +32,6 @@ func NewDifferencingDiskBlockReader(vhdReader *reader.VhdReader, blockAllocation
// Read reads the data in a block of a differencing disk
// The parameter block represents the block whose 'data' section to read
//
func (r *DifferencingDiskBlockReader) Read(block *Block) ([]byte, error) {
blockIndex := block.BlockIndex
if !r.blockAllocationTable.HasData(blockIndex) {

View File

@@ -1,9 +1,9 @@
package block
import (
"github.com/Microsoft/azure-vhd-utils/vhdcore"
"github.com/Microsoft/azure-vhd-utils/vhdcore/block/bitmap"
"github.com/Microsoft/azure-vhd-utils/vhdcore/common"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/block/bitmap"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/common"
)
// DynamicDiskBlockFactory is a type which is used for following purposes
@@ -12,7 +12,6 @@ import (
// To get the block size of the block in dynamic disk
// To get a Sector instance representing sector of dynamic disk's block
// To get the logical footer range of fixed disk generated from the dynamic disk
//
type DynamicDiskBlockFactory struct {
params *FactoryParams
bitmapFactory *bitmap.Factory
@@ -24,7 +23,6 @@ type DynamicDiskBlockFactory struct {
// NewDynamicDiskFactory creates a DynamicDiskBlockFactory instance which can be used to create a
// Block objects representing dynamic disk block of a size specified in header BlockSize field
// parameter params contains header, footer, BAT of dynamic disk and reader to read the disk.
//
func NewDynamicDiskFactory(params *FactoryParams) *DynamicDiskBlockFactory {
blockFactory := &DynamicDiskBlockFactory{params: params}
@@ -42,27 +40,23 @@ func NewDynamicDiskFactory(params *FactoryParams) *DynamicDiskBlockFactory {
}
// GetBlockCount returns the number of blocks in the dynamic disk.
//
func (f *DynamicDiskBlockFactory) GetBlockCount() int64 {
return int64(f.params.BlockAllocationTable.BATEntriesCount)
}
// GetBlockSize returns the size of the 'data section' of block in bytes in the dynamic disk.
//
func (f *DynamicDiskBlockFactory) GetBlockSize() int64 {
return int64(f.params.VhdHeader.BlockSize)
}
// GetFooterRange returns the logical range of the footer when converting this dynamic vhd to fixed
// logical range of footer is the absolute start and end byte offset of the footer.
//
func (f *DynamicDiskBlockFactory) GetFooterRange() *common.IndexRange {
return common.NewIndexRangeFromLength(f.GetBlockCount()*f.GetBlockSize(), vhdcore.VhdFooterSize)
}
// Create returns an instance of Block which represents a dynamic disk block, the parameter blockIndex
// identifies the block. This function return error if the block cannot be created due to any read error.
//
func (f *DynamicDiskBlockFactory) Create(blockIndex uint32) (*Block, error) {
if f.cachedDynamicBlock == nil || f.cachedDynamicBlock.BlockIndex != blockIndex {
logicalRange := common.NewIndexRangeFromLength(int64(blockIndex)*f.GetBlockSize(), f.GetBlockSize())
@@ -93,7 +87,6 @@ func (f *DynamicDiskBlockFactory) Create(blockIndex uint32) (*Block, error) {
// GetSector returns an instance of Sector in a dynamic disk, parameter block object identifying the block
// containing the sector, the parameter sectorIndex identifies the sector in the block. This function return
// error if the sector cannot be created due to any read error or if the requested sector index is invalid.
//
func (f *DynamicDiskBlockFactory) GetSector(block *Block, sectorIndex uint32) (*Sector, error) {
blockIndex := block.BlockIndex
if block.IsEmpty {
@@ -105,7 +98,6 @@ func (f *DynamicDiskBlockFactory) GetSector(block *Block, sectorIndex uint32) (*
// GetBitmapFactory returns an instance of BitmapFactory that can be used to create the bitmap of a block
// by reading block from dynamic disk.
//
func (f *DynamicDiskBlockFactory) GetBitmapFactory() *bitmap.Factory {
return f.bitmapFactory
}

View File

@@ -3,15 +3,14 @@ package block
import (
"io"
"github.com/Microsoft/azure-vhd-utils/vhdcore/bat"
"github.com/Microsoft/azure-vhd-utils/vhdcore/footer"
"github.com/Microsoft/azure-vhd-utils/vhdcore/reader"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/bat"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/footer"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/reader"
)
// DynamicDiskBlockReader type satisfies BlockDataReader interface,
// implementation of BlockDataReader::Read by this type can read the 'data' section
// of a dynamic disk's block.
//
type DynamicDiskBlockReader struct {
vhdReader *reader.VhdReader
blockAllocationTable *bat.BlockAllocationTable
@@ -24,7 +23,6 @@ type DynamicDiskBlockReader struct {
// The parameter vhdReader is the reader to read the disk
// The parameter blockAllocationTable represents the disk's BAT
// The parameter blockSizeInBytes is the size of the dynamic disk block
//
func NewDynamicDiskBlockReader(vhdReader *reader.VhdReader, blockAllocationTable *bat.BlockAllocationTable, blockSizeInBytes uint32) *DynamicDiskBlockReader {
return &DynamicDiskBlockReader{
@@ -37,7 +35,6 @@ func NewDynamicDiskBlockReader(vhdReader *reader.VhdReader, blockAllocationTable
// Read reads the data in a block of a dynamic disk
// The parameter block represents the block whose 'data' section to read
//
func (r *DynamicDiskBlockReader) Read(block *Block) ([]byte, error) {
blockIndex := block.BlockIndex
if !r.blockAllocationTable.HasData(blockIndex) {

View File

@@ -1,12 +1,11 @@
package block
import (
"github.com/Microsoft/azure-vhd-utils/vhdcore/common"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/common"
)
// Factory interface that all block factories specific to disk type (fixed,
// dynamic, differencing) needs to satisfy.
//
type Factory interface {
GetBlockCount() int64
GetBlockSize() int64

View File

@@ -1,15 +1,14 @@
package block
import (
"github.com/Microsoft/azure-vhd-utils/vhdcore/bat"
"github.com/Microsoft/azure-vhd-utils/vhdcore/footer"
"github.com/Microsoft/azure-vhd-utils/vhdcore/header"
"github.com/Microsoft/azure-vhd-utils/vhdcore/reader"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/bat"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/footer"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/header"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/reader"
)
// FactoryParams represents type of the parameter for different disk block
// factories.
//
type FactoryParams struct {
VhdFooter *footer.Footer
VhdHeader *header.Header

View File

@@ -4,8 +4,8 @@ import (
"log"
"math"
"github.com/Microsoft/azure-vhd-utils/vhdcore"
"github.com/Microsoft/azure-vhd-utils/vhdcore/common"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/common"
)
// FixedDiskBlockFactory is a type which is used for following purposes
@@ -14,7 +14,6 @@ import (
// To get the block size of the block in fixed disk
// To get a Sector instance representing sector of fixed disk's block
// To get the logical footer range of the fixed disk
//
type FixedDiskBlockFactory struct {
params *FactoryParams
sectorFactory *SectorFactory
@@ -28,7 +27,6 @@ type FixedDiskBlockFactory struct {
// NewFixedDiskBlockFactoryWithDefaultBlockSize creates a FixedDiskBlockFactory instance which can
// be used to create a Block object representing fixed disk block of default size 512 KB.
// parameter params contains header, footer of the fixed disk and reader to read the disk.
//
func NewFixedDiskBlockFactoryWithDefaultBlockSize(params *FactoryParams) *FixedDiskBlockFactory {
return NewFixedDiskBlockFactory(params, vhdcore.VhdDefaultBlockSize)
}
@@ -37,7 +35,6 @@ func NewFixedDiskBlockFactoryWithDefaultBlockSize(params *FactoryParams) *FixedD
// Block objects representing fixed disk block of a specific size, parameter params contains header,
// footer of the fixed disk and reader to read the disk, parameter blockSize represents the size
// of blocks in the fixed disk
//
func NewFixedDiskBlockFactory(params *FactoryParams, blockSize int64) *FixedDiskBlockFactory {
blockFactory := &FixedDiskBlockFactory{params: params}
@@ -65,20 +62,17 @@ func NewFixedDiskBlockFactory(params *FactoryParams, blockSize int64) *FixedDisk
}
// GetBlockCount returns the number of blocks in the fixed disk.
//
func (f *FixedDiskBlockFactory) GetBlockCount() int64 {
return f.blockCount
}
// GetBlockSize returns the size of the block in bytes of the fixed disk.
//
func (f *FixedDiskBlockFactory) GetBlockSize() int64 {
return f.blockSize
}
// GetFooterRange returns the logical range of the footer of the fixed disk, logical range of footer
// is the absolute start and end byte offset of the footer.
//
func (f *FixedDiskBlockFactory) GetFooterRange() *common.IndexRange {
footerStartIndex := f.params.VhdReader.Size - vhdcore.VhdFooterSize
return common.NewIndexRangeFromLength(footerStartIndex, vhdcore.VhdFooterSize)
@@ -86,7 +80,6 @@ func (f *FixedDiskBlockFactory) GetFooterRange() *common.IndexRange {
// Create returns an instance of Block which represents a fixed disk block, the parameter blockIndex
// identifies the block.
//
func (f *FixedDiskBlockFactory) Create(blockIndex uint32) (*Block, error) {
if f.cachedFixedBlock == nil || f.cachedFixedBlock.BlockIndex != blockIndex {
var logicalRange *common.IndexRange
@@ -112,7 +105,6 @@ func (f *FixedDiskBlockFactory) Create(blockIndex uint32) (*Block, error) {
// GetSector returns an instance of Sector in a fixed disk, parameter block describes the block containing the
// sector, the parameter sectorIndex identifies the sector in the block. This function return error if the sector
// cannot be created due to any read error or if the requested sector index is invalid.
//
func (f *FixedDiskBlockFactory) GetSector(block *Block, sectorIndex uint32) (*Sector, error) {
blockIndex := block.BlockIndex
if block.IsEmpty {
@@ -124,7 +116,6 @@ func (f *FixedDiskBlockFactory) GetSector(block *Block, sectorIndex uint32) (*Se
// getExtraBlockLogicalRange returns the IndexRange representing the additional block if any. Additional block
// is the last block whose size < FixedDiskBlockFactory.BlockSize
//
func (f *FixedDiskBlockFactory) getExtraBlockLogicalRange() *common.IndexRange {
if f.extraBlockIndex == nil {
log.Panicf("Unexpected state, extraBlockIndex not set")

View File

@@ -3,14 +3,13 @@ package block
import (
"io"
"github.com/Microsoft/azure-vhd-utils/vhdcore/footer"
"github.com/Microsoft/azure-vhd-utils/vhdcore/reader"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/footer"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/reader"
)
// FixedDiskBlockReader type satisfies BlockDataReader interface,
// implementation of BlockDataReader::Read by this type can read the data from a block
// of a fixed disk.
//
type FixedDiskBlockReader struct {
vhdReader *reader.VhdReader
blockSizeInBytes uint32
@@ -20,7 +19,6 @@ type FixedDiskBlockReader struct {
// a fixed disk block.
// The parameter vhdReader is the reader to read the disk
// The parameter blockSizeInBytes is the size of the fixed disk block
//
func NewFixedDiskBlockReader(vhdReader *reader.VhdReader, blockSizeInBytes uint32) *FixedDiskBlockReader {
return &FixedDiskBlockReader{
vhdReader: vhdReader,
@@ -30,7 +28,6 @@ func NewFixedDiskBlockReader(vhdReader *reader.VhdReader, blockSizeInBytes uint3
// Read reads the data in a block of a fixed disk
// The parameter block represents the block to read
//
func (r *FixedDiskBlockReader) Read(block *Block) ([]byte, error) {
blockIndex := block.BlockIndex
blockByteOffset := int64(blockIndex) * int64(r.blockSizeInBytes)

View File

@@ -3,12 +3,11 @@ package block
import (
"fmt"
"github.com/Microsoft/azure-vhd-utils/vhdcore"
"github.com/Microsoft/azure-vhd-utils/vhdcore/reader"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/reader"
)
// SectorFactory type is used to create Sector instance by reading 512 byte sector from block's 'data section'.
//
type SectorFactory struct {
vhdReader *reader.VhdReader
blockHasData func(uint32) bool
@@ -21,7 +20,6 @@ type SectorFactory struct {
// vhdReader is the reader to be used to read the sector, blockHasData is a function which can be used to
// check a block is empty by providing block identifier, getBlockAddress is a function which can be used
// to fetch the absolute byte offset of a block by providing block identifier.
//
func NewSectorFactory(vhdReader *reader.VhdReader, blockHasData func(uint32) bool, getBlockAddress func(uint32) int64) *SectorFactory {
return &SectorFactory{
vhdReader: vhdReader,
@@ -33,7 +31,6 @@ func NewSectorFactory(vhdReader *reader.VhdReader, blockHasData func(uint32) boo
// Create creates an instance of Sector by reading a 512 byte sector from the 'data section' of a block.
// block describes the block containing the sector, sectorIndex identifies the sector to read.
// This function return error if requested sector is invalid or in case of any read error.
//
func (f *SectorFactory) Create(block *Block, sectorIndex uint32) (*Sector, error) {
if int64(sectorIndex) > block.GetSectorCount() {
return nil, fmt.Errorf("Total sectors: %d, Requested Sectors: %d", block.GetSectorCount(), sectorIndex)
@@ -60,7 +57,6 @@ func (f *SectorFactory) Create(block *Block, sectorIndex uint32) (*Sector, error
// CreateEmptySector creates an instance of Sector representing empty sector. The Data property of this sector
// will be a slice of 512 bytes filled with zeros.
//
func (f *SectorFactory) CreateEmptySector(blockIndex, sectorIndex uint32) *Sector {
if f.emptySectorBuf == nil {
f.emptySectorBuf = make([]byte, vhdcore.VhdSectorLength)

View File

@@ -4,17 +4,16 @@ import (
"errors"
"io"
"github.com/Microsoft/azure-vhd-utils/vhdcore"
"github.com/Microsoft/azure-vhd-utils/vhdcore/block"
"github.com/Microsoft/azure-vhd-utils/vhdcore/common"
"github.com/Microsoft/azure-vhd-utils/vhdcore/footer"
"github.com/Microsoft/azure-vhd-utils/vhdcore/vhdfile"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/block"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/common"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/footer"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/vhdfile"
)
// DiskStream provides a logical stream over a VHD file.
// The type exposes the VHD as a fixed VHD, regardless of actual underlying VHD type (dynamic, differencing
// or fixed type)
//
type DiskStream struct {
offset int64
size int64
@@ -27,7 +26,6 @@ type DiskStream struct {
}
// StreamExtent describes a block range of a disk which contains data.
//
type StreamExtent struct {
Range *common.IndexRange
OwnerVhdUniqueID *common.UUID
@@ -35,7 +33,6 @@ type StreamExtent struct {
// CreateNewDiskStream creates a new DiskStream.
// Parameter vhdPath is the path to VHD
//
func CreateNewDiskStream(vhdPath string) (*DiskStream, error) {
var err error
stream := &DiskStream{offset: 0, isClosed: false}
@@ -56,13 +53,11 @@ func CreateNewDiskStream(vhdPath string) (*DiskStream, error) {
// GetDiskType returns the type of the disk, expected values are DiskTypeFixed, DiskTypeDynamic
// or DiskTypeDifferencing
//
func (s *DiskStream) GetDiskType() footer.DiskType {
return s.vhdFile.GetDiskType()
}
// GetSize returns the length of the stream in bytes.
//
func (s *DiskStream) GetSize() int64 {
return s.size
}
@@ -78,7 +73,6 @@ func (s *DiskStream) GetSize() int64 {
// end of footer section after reading some but not all the bytes then Read won't return any error.
//
// Read satisfies io.Reader interface
//
func (s *DiskStream) Read(p []byte) (n int, err error) {
if s.offset >= s.size {
return 0, io.EOF
@@ -110,7 +104,6 @@ func (s *DiskStream) Read(p []byte) (n int, err error) {
// means relative to the end. It returns the new offset and an error, if any.
//
// Seek satisfies io.Seeker interface
//
func (s *DiskStream) Seek(offset int64, whence int) (int64, error) {
switch whence {
default:
@@ -134,7 +127,6 @@ func (s *DiskStream) Seek(offset int64, whence int) (int64, error) {
// Close closes the VHD file, rendering it unusable for I/O. It returns an error, if any.
//
// Close satisfies io.Closer interface
//
func (s *DiskStream) Close() error {
if !s.isClosed {
s.vhdFactory.Dispose(nil)
@@ -150,7 +142,6 @@ func (s *DiskStream) Close() error {
// so returned extents slice will not contain such range.
// For fixed disk - this method returns extents describing ranges of all blocks, to rule out fixed disk block
// ranges containing zero bytes use DetectEmptyRanges function in upload package.
//
func (s *DiskStream) GetExtents() ([]*StreamExtent, error) {
extents := make([]*StreamExtent, 1)
blocksCount := s.vhdBlockFactory.GetBlockCount()
@@ -181,7 +172,6 @@ func (s *DiskStream) GetExtents() ([]*StreamExtent, error) {
// so returned extents slice will not contain such range.
// For fixed disk - this method returns extents describing ranges of all blocks, to rule out fixed disk block
// ranges containing zero bytes use DetectEmptyRanges function in upload package.
//
func (s *DiskStream) EnumerateExtents(f func(*StreamExtent, error) bool) {
blocksCount := s.vhdBlockFactory.GetBlockCount()
i := int64(0)
@@ -214,7 +204,6 @@ func (s *DiskStream) EnumerateExtents(f func(*StreamExtent, error) bool) {
// readFromBlocks identifies the blocks constituting the range rangeToRead, and read data from these
// blocks into p. It returns the number of bytes read, which will be the minimum of sum of lengths
// of all constituting range and len(p), provided there is no error.
//
func (s *DiskStream) readFromBlocks(rangeToRead *common.IndexRange, p []byte) (n int, err error) {
rangeToReadFromBlocks := s.vhdDataRange.Intersection(rangeToRead)
if rangeToReadFromBlocks == nil {
@@ -248,7 +237,6 @@ func (s *DiskStream) readFromBlocks(rangeToRead *common.IndexRange, p []byte) (n
// readFromFooter reads the range rangeToRead from footer into p. It returns the number of bytes read, which
// will be minimum of the given range length and len(p), provided there is no error.
//
func (s *DiskStream) readFromFooter(rangeToRead *common.IndexRange, p []byte) (n int, err error) {
rangeToReadFromFooter := s.vhdFooterRange.Intersection(rangeToRead)
if rangeToReadFromFooter == nil {
@@ -277,7 +265,6 @@ func (s *DiskStream) readFromFooter(rangeToRead *common.IndexRange, p []byte) (n
}
// byteToBlock returns the block index corresponding to the given byte position.
//
func (s *DiskStream) byteToBlock(position int64) int64 {
sectorsPerBlock := s.vhdBlockFactory.GetBlockSize() / vhdcore.VhdSectorLength
return position / vhdcore.VhdSectorLength / sectorsPerBlock

View File

@@ -3,11 +3,10 @@ package footer
import (
"fmt"
"github.com/Microsoft/azure-vhd-utils/vhdcore"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore"
)
// DiskGeometry represents the cylinder, heads and sectors (CHS) per track.
//
type DiskGeometry struct {
// Offset = 0, Size = 2
// Stored in big-endian format
@@ -21,7 +20,6 @@ type DiskGeometry struct {
// CreateNewDiskGeometry creates a new DiskGeometry from the given virtual
// size. CHS field values are calculated based on the total data sectors
// present in the disk image.
//
func CreateNewDiskGeometry(virtualSize int64) *DiskGeometry {
// Total data sectors present in the disk image
var totalSectors = virtualSize / vhdcore.VhdSectorLength
@@ -77,7 +75,6 @@ func CreateNewDiskGeometry(virtualSize int64) *DiskGeometry {
}
// CreateCopy creates a copy of this instance
//
func (d *DiskGeometry) CreateCopy() *DiskGeometry {
return &DiskGeometry{
Cylinder: d.Cylinder,
@@ -88,7 +85,6 @@ func (d *DiskGeometry) CreateCopy() *DiskGeometry {
// Equals returns true if this and other points to the same instance
// or if CHS fields of pointed instances are same
//
func (d *DiskGeometry) Equals(other *DiskGeometry) bool {
if other == nil {
return false
@@ -98,7 +94,6 @@ func (d *DiskGeometry) Equals(other *DiskGeometry) bool {
}
// String returns the string representation of this range, this satisfies stringer interface.
//
func (d *DiskGeometry) String() string {
return fmt.Sprintf("Cylinder:%d Heads:%d Sectors:%d", d.Cylinder, d.Heads, d.Sectors)
}

View File

@@ -4,13 +4,12 @@ import (
"fmt"
"time"
"github.com/Microsoft/azure-vhd-utils/vhdcore"
"github.com/Microsoft/azure-vhd-utils/vhdcore/common"
"github.com/Microsoft/azure-vhd-utils/vhdcore/reader"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/common"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/reader"
)
// Factory type is used to create Footer instance by reading vhd footer section.
//
type Factory struct {
vhdReader *reader.VhdReader
footerOffset int64
@@ -18,14 +17,12 @@ type Factory struct {
// NewFactory creates a new instance of Factory, which can be used to create a Footer
// instance by reading the footer section using VhdReader.
//
func NewFactory(vhdReader *reader.VhdReader) *Factory {
return &Factory{vhdReader: vhdReader, footerOffset: vhdReader.Size - vhdcore.VhdFooterSize}
}
// Create creates a Footer instance by reading the footer section of the disk.
// This function return error if any error occurs while reading or parsing the footer fields.
//
func (f *Factory) Create() (*Footer, error) {
footer := &Footer{}
var err error
@@ -124,7 +121,6 @@ func (f *Factory) Create() (*Footer, error) {
// This function returns error if the cookie is invalid, if no or fewer bytes could be
// read. Cookie is stored as eight-character ASCII string starting at offset 0 relative
// to the beginning of footer.
//
func (f *Factory) readVhdCookie() (*vhdcore.Cookie, error) {
cookieData := make([]byte, 8)
if _, err := f.vhdReader.ReadBytes(f.footerOffset+0, cookieData); err != nil {
@@ -142,7 +138,6 @@ func (f *Factory) readVhdCookie() (*vhdcore.Cookie, error) {
// fewer bytes could be read.
// Feature is stored as 4 bytes value starting at offset 8 relative to the beginning of
// footer.
//
func (f *Factory) readFeatures() (VhdFeature, error) {
value, err := f.vhdReader.ReadUInt32(f.footerOffset + 8)
if err != nil {
@@ -155,7 +150,6 @@ func (f *Factory) readFeatures() (VhdFeature, error) {
// This function is return error if no or fewer bytes could be read.
// VhdFileFormatVersion is stored as 4 bytes value starting at offset 12 relative to the
// beginning of footer.
//
func (f *Factory) readFileFormatVersion() (VhdFileFormatVersion, error) {
value, err := f.vhdReader.ReadUInt32(f.footerOffset + 12)
if err != nil {
@@ -168,7 +162,6 @@ func (f *Factory) readFileFormatVersion() (VhdFileFormatVersion, error) {
// This function return error if no or fewer bytes could be read.
// Header offset is stored as 8 bytes value starting at offset 16 relative to the beginning
// of footer. This value is stored in big-endian format.
//
func (f *Factory) readHeaderOffset() (int64, error) {
value, err := f.vhdReader.ReadInt64(f.footerOffset + 16)
if err != nil {
@@ -182,7 +175,6 @@ func (f *Factory) readHeaderOffset() (int64, error) {
// This function return error if no or fewer bytes could be read.
// TimeStamp is stored as 4 bytes value starting at offset 24 relative to the beginning
// of footer. This value is stored in big-endian format.
//
func (f *Factory) readTimeStamp() (*time.Time, error) {
value, err := f.vhdReader.ReadDateTime(f.footerOffset + 24)
if err != nil {
@@ -196,7 +188,6 @@ func (f *Factory) readTimeStamp() (*time.Time, error) {
// character set. This function return error if no or fewer bytes could be read.
// Identifier is stored as 4 bytes value starting at offset 28 relative to the beginning
// of footer.
//
func (f *Factory) readCreatorApplication() (string, error) {
creatorApp := make([]byte, 4)
_, err := f.vhdReader.ReadBytes(f.footerOffset+28, creatorApp)
@@ -211,7 +202,6 @@ func (f *Factory) readCreatorApplication() (string, error) {
// bytes could be read.
// Version is stored as 4 bytes value starting at offset 32 relative to the beginning
// of footer.
//
func (f *Factory) readCreatorVersion() (VhdCreatorVersion, error) {
value, err := f.vhdReader.ReadUInt32(f.footerOffset + 32)
if err != nil {
@@ -225,7 +215,6 @@ func (f *Factory) readCreatorVersion() (VhdCreatorVersion, error) {
// bytes could be read.
// Version is stored as 4 bytes value starting at offset 36 relative to the beginning
// of footer.
//
func (f *Factory) readCreatorHostOsType() (HostOsType, error) {
value, err := f.vhdReader.ReadUInt32(f.footerOffset + 36)
if err != nil {
@@ -241,7 +230,6 @@ func (f *Factory) readCreatorHostOsType() (HostOsType, error) {
// beginning of footer. This size does not include the size consumed by vhd metadata such as
// header, footer BAT, block's bitmap
// This value is stored in big-endian format.
//
func (f *Factory) readPhysicalSize() (int64, error) {
value, err := f.vhdReader.ReadInt64(f.footerOffset + 40)
if err != nil {
@@ -258,7 +246,6 @@ func (f *Factory) readPhysicalSize() (int64, error) {
// beginning of footer. This size does not include the size consumed by vhd metadata such as
// header, footer BAT, block's bitmap
// This value is stored in big-endian format.
//
func (f *Factory) readVirtualSize() (int64, error) {
value, err := f.vhdReader.ReadInt64(f.footerOffset + 48)
if err != nil {
@@ -271,7 +258,6 @@ func (f *Factory) readVirtualSize() (int64, error) {
// track value for the hard disk. This function return error if no or fewer bytes could
// be read. The value is stored starting starting at offset 56 relative to the beginning of
// footer. This value is stored in big-endian format.
//
func (f *Factory) readDiskGeometry() (*DiskGeometry, error) {
diskGeometry := &DiskGeometry{}
cylinder, err := f.vhdReader.ReadUInt16(f.footerOffset + 56 + 0)
@@ -296,7 +282,6 @@ func (f *Factory) readDiskGeometry() (*DiskGeometry, error) {
// This function return error if no or fewer bytes could be read.
// The value is stored as 4 byte value starting at offset 60 relative to the beginning
// of footer. This value is stored in big-endian format.
//
func (f *Factory) readDiskType() (DiskType, error) {
value, err := f.vhdReader.ReadUInt32(f.footerOffset + 60)
if err != nil {
@@ -309,7 +294,6 @@ func (f *Factory) readDiskType() (DiskType, error) {
// This function return error if no or fewer bytes could be read.
// The value is stored as 4 byte value starting at offset 64 relative to the beginning
// of footer. This value is stored in big-endian format.
//
func (f *Factory) readCheckSum() (uint32, error) {
value, err := f.vhdReader.ReadUInt32(f.footerOffset + 64)
if err != nil {
@@ -323,7 +307,6 @@ func (f *Factory) readCheckSum() (uint32, error) {
// This function return error if no or fewer bytes could be read.
// The value is stored as 16 byte value starting at offset 68 relative to the beginning
// of footer.
//
func (f *Factory) readUniqueID() (*common.UUID, error) {
value, err := f.vhdReader.ReadUUID(f.footerOffset + 68)
if err != nil {
@@ -336,7 +319,6 @@ func (f *Factory) readUniqueID() (*common.UUID, error) {
// This function return error if the byte could be read.
// The value is stored as 1 byte value starting at offset 84 relative to the beginning
// of footer.
//
func (f *Factory) readSavedState() (bool, error) {
value, err := f.vhdReader.ReadBoolean(f.footerOffset + 84)
if err != nil {
@@ -349,7 +331,6 @@ func (f *Factory) readSavedState() (bool, error) {
// This function return error if the byte could be read.
// It is 427 bytes in size starting at offset 85 relative to the beginning
// of footer.
//
func (f *Factory) readReserved() ([]byte, error) {
reserved := make([]byte, 427)
_, err := f.vhdReader.ReadBytes(f.footerOffset+85, reserved)
@@ -361,7 +342,6 @@ func (f *Factory) readReserved() ([]byte, error) {
// readWholeFooter reads the entire footer as a raw bytes. This function return
// error if the byte could be read.
//
func (f *Factory) readWholeFooter() ([]byte, error) {
rawData := make([]byte, 512)
_, err := f.vhdReader.ReadBytes(f.footerOffset+0, rawData)

View File

@@ -4,14 +4,13 @@ import (
"bytes"
"time"
"github.com/Microsoft/azure-vhd-utils/vhdcore"
"github.com/Microsoft/azure-vhd-utils/vhdcore/common"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/common"
)
// Footer represents the footer of the vhd, the size of the footer is 512 bytes.
// The last 512 bytes of the disk is footer. In case of dynamic and differential
// vhds, the footer is replicated at the beginning of the disk as well.
//
type Footer struct {
// Offset = 0, Size = 8
Cookie *vhdcore.Cookie
@@ -53,7 +52,6 @@ type Footer struct {
}
// CreateCopy creates and returns a deep copy of this instance.
//
func (v *Footer) CreateCopy() *Footer {
return &Footer{
Cookie: v.Cookie.CreateCopy(),
@@ -78,7 +76,6 @@ func (v *Footer) CreateCopy() *Footer {
// Equal returns true if this and other points to the same instance or if contents
// of the fields of these two instances are same.
//
func (v *Footer) Equal(other *Footer) bool {
if other == nil {
return false

View File

@@ -1,12 +1,11 @@
package footer
import (
"github.com/Microsoft/azure-vhd-utils/vhdcore"
"github.com/Microsoft/azure-vhd-utils/vhdcore/writer"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/writer"
)
// SerializeFooter returns the given VhdFooter instance as byte slice of length 512 bytes.
//
func SerializeFooter(footer *Footer) []byte {
buffer := make([]byte, vhdcore.VhdFooterSize)
writer := writer.NewVhdWriterFromByteSlice(buffer)

View File

@@ -5,14 +5,13 @@ import (
"strings"
"time"
"github.com/Microsoft/azure-vhd-utils/vhdcore"
"github.com/Microsoft/azure-vhd-utils/vhdcore/common"
"github.com/Microsoft/azure-vhd-utils/vhdcore/header/parentlocator"
"github.com/Microsoft/azure-vhd-utils/vhdcore/reader"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/common"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/header/parentlocator"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/reader"
)
// Factory type is used to create VhdHeader instance by reading vhd header section.
//
type Factory struct {
vhdReader *reader.VhdReader
headerOffset int64
@@ -20,14 +19,12 @@ type Factory struct {
// NewFactory creates a new instance of Factory, which can be used to create
// a VhdHeader instance by reading the header section using vhdReader.
//
func NewFactory(vhdReader *reader.VhdReader, headerOffset int64) *Factory {
return &Factory{vhdReader: vhdReader, headerOffset: headerOffset}
}
// Create creates a Header instance by reading the header section of a expandable disk.
// This function return error if any error occurs while reading or parsing the header fields.
//
func (f *Factory) Create() (*Header, error) {
header := &Header{}
var err error
@@ -107,7 +104,6 @@ func (f *Factory) Create() (*Header, error) {
// This function return error if the cookie is invalid, if no or fewer bytes could be read.
// Cookie is stored as eight-character ASCII string starting at offset 0 relative to the beginning
// of header.
//
func (f *Factory) readHeaderCookie() (*vhdcore.Cookie, error) {
cookieData := make([]byte, 8)
if _, err := f.vhdReader.ReadBytes(f.headerOffset+0, cookieData); err != nil {
@@ -126,7 +122,6 @@ func (f *Factory) readHeaderCookie() (*vhdcore.Cookie, error) {
// bytes could be read.
// This value is stored as 8 bytes value starting at offset 8 relative to the beginning of header.
// This value is stored in big-endian format.
//
func (f *Factory) readDataOffset() (int64, error) {
value, err := f.vhdReader.ReadInt64(f.headerOffset + 8)
if err != nil {
@@ -139,7 +134,6 @@ func (f *Factory) readDataOffset() (int64, error) {
// disk. This function return error if no or fewer bytes could be read.
// BATOffset is stored as 8 bytes value starting at offset 16 relative to the beginning of header.
// This value is stored in big-endian format.
//
func (f *Factory) readBATOffset() (int64, error) {
value, err := f.vhdReader.ReadInt64(f.headerOffset + 16)
if err != nil {
@@ -151,7 +145,6 @@ func (f *Factory) readBATOffset() (int64, error) {
// readHeaderVersion reads the value of the field the holds the major/minor version of the disk header.
// This function return error if no or fewer bytes could be read. HeaderVersion is stored as 4 bytes
// value starting at offset 24 relative to the beginning of header.
//
func (f *Factory) readHeaderVersion() (VhdHeaderVersion, error) {
value, err := f.vhdReader.ReadUInt32(f.headerOffset + 24)
if err != nil {
@@ -169,7 +162,6 @@ func (f *Factory) readHeaderVersion() (VhdHeaderVersion, error) {
// error if no or fewer bytes could be read.
// MaxTableEntries is stored as 4 bytes value starting at offset 28 relative to the beginning of
// header. This value is stored in big-endian format.
//
func (f *Factory) readMaxBATEntries() (uint32, error) {
value, err := f.vhdReader.ReadUInt32(f.headerOffset + 28)
if err != nil {
@@ -182,7 +174,6 @@ func (f *Factory) readMaxBATEntries() (uint32, error) {
// bitmap section'. This function return error if no or fewer bytes could be read.
// BlockSize is stored as 4 bytes value starting at offset 32 relative to the beginning of header.
// This value is stored in big-endian format.
//
func (f *Factory) readBlockSize() (uint32, error) {
value, err := f.vhdReader.ReadUInt32(f.headerOffset + 32)
if err != nil {
@@ -195,7 +186,6 @@ func (f *Factory) readBlockSize() (uint32, error) {
// This function return error if no or fewer bytes could be read.
// The value is stored as 4 byte value starting at offset 36 relative to the beginning of header.
// This value is stored in big-endian format.
//
func (f *Factory) readCheckSum() (uint32, error) {
value, err := f.vhdReader.ReadUInt32(f.headerOffset + 36)
if err != nil {
@@ -208,7 +198,6 @@ func (f *Factory) readCheckSum() (uint32, error) {
// field is used only for differencing disk. This is a 128-bit universally unique identifier (UUID).
// This function return error if no or fewer bytes could be read.
// The value is stored as 16 byte value starting at offset 40 relative to the beginning of header.
//
func (f *Factory) readParentUniqueID() (*common.UUID, error) {
value, err := f.vhdReader.ReadUUID(f.headerOffset + 40)
if err != nil {
@@ -222,7 +211,6 @@ func (f *Factory) readParentUniqueID() (*common.UUID, error) {
// instance of time.Time. This function return error if no or fewer bytes could be read.
// TimeStamp is stored as 4 bytes value starting at offset 56 relative to the beginning of header.
// This value is stored in big-endian format.
//
func (f *Factory) readParentTimeStamp() (*time.Time, error) {
value, err := f.vhdReader.ReadDateTime(f.headerOffset + 56)
if err != nil {
@@ -234,7 +222,6 @@ func (f *Factory) readParentTimeStamp() (*time.Time, error) {
// readReserved reads the reserved field which is not used and all set to zero. This function return
// error if no or fewer bytes could be read. Reserved is stored as 4 bytes value starting at offset
// 60 relative to the beginning of header. This value is stored in big-endian format.
//
func (f *Factory) readReserved() (uint32, error) {
value, err := f.vhdReader.ReadUInt32(f.headerOffset + 60)
if err != nil {
@@ -246,7 +233,6 @@ func (f *Factory) readReserved() (uint32, error) {
// readParentPath reads the field storing parent hard disk file name. This function return error if
// no or fewer bytes could be read. ParentPath is stored in UTF-16 as big-endian format, its length is
// 512 bytes, starting at offset 64 relative to the beginning of header.
//
func (f *Factory) readParentPath() (string, error) {
parentPath := make([]byte, 512)
_, err := f.vhdReader.ReadBytes(f.headerOffset+64, parentPath)
@@ -259,7 +245,6 @@ func (f *Factory) readParentPath() (string, error) {
// readParentLocators reads the collection of parent locator entries. This function return error if
// no or fewer bytes could be read. There are 8 entries, each 24 bytes, starting at offset 576 relative
// to the beginning of header.
//
func (f *Factory) readParentLocators() (parentlocator.ParentLocators, error) {
var err error
count := 8
@@ -279,7 +264,6 @@ func (f *Factory) readParentLocators() (parentlocator.ParentLocators, error) {
// readWholeHeader reads the entire header as a raw bytes. This function return error if the byte
// could be read.
//
func (f *Factory) readWholeHeader() ([]byte, error) {
rawData := make([]byte, 1024)
_, err := f.vhdReader.ReadBytes(f.headerOffset+0, rawData)

View File

@@ -3,9 +3,9 @@ package header
import (
"time"
"github.com/Microsoft/azure-vhd-utils/vhdcore"
"github.com/Microsoft/azure-vhd-utils/vhdcore/common"
"github.com/Microsoft/azure-vhd-utils/vhdcore/header/parentlocator"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/common"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/header/parentlocator"
)
// Header represents the header of the vhd, size of the header is 1024 bytes.
@@ -14,7 +14,6 @@ import (
// replicated at the beginning of the disk as well, the header structure follows
// this replicated footer, the field 'HeaderOffset' in the footer contains absolute
// offset to the header structure.
//
type Header struct {
// Offset = 0, Size = 8
Cookie *vhdcore.Cookie

View File

@@ -2,12 +2,12 @@ package parentlocator
import (
"fmt"
"github.com/Microsoft/azure-vhd-utils/vhdcore/reader"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/reader"
)
// Factory type is used to create ParentLocator instance by reading one entry
// in vhd header's parent-hard-disk-locator-info collection section.
//
type Factory struct {
vhdReader *reader.VhdReader
locatorOffset int64
@@ -16,7 +16,6 @@ type Factory struct {
// NewFactory creates a new instance of Factory, which can be used to create ParentLocator instance
// by reading one entry from the vhd header's parent-hard-disk-locator-info collection,
// locatorOffset is the offset of the entry to read, vhdReader is the reader to be used to read the entry.
//
func NewFactory(vhdReader *reader.VhdReader, locatorOffset int64) *Factory {
return &Factory{vhdReader: vhdReader, locatorOffset: locatorOffset}
}
@@ -24,7 +23,6 @@ func NewFactory(vhdReader *reader.VhdReader, locatorOffset int64) *Factory {
// Create creates a ParentLocator instance by reading one entry in vhd header's parent-hard-disk-locator-info
// collection section of the disk. This function return error if any error occurs while reading or parsing
// the parent locators table fields.
//
func (f *Factory) Create() (*ParentLocator, error) {
locator := &ParentLocator{}
var err error
@@ -73,7 +71,6 @@ func (f *Factory) Create() (*ParentLocator, error) {
// This function return error if no or fewer bytes could be read. The value is stored as 4 byte
// value starting at offset 0 relative to the beginning of this parent-hard-disk-locator. This value
// is stored in big-endian format.
//
func (f *Factory) readPlatformCode() (PlatformCode, error) {
value, err := f.vhdReader.ReadInt32(f.locatorOffset + 0)
if err != nil {
@@ -86,7 +83,6 @@ func (f *Factory) readPlatformCode() (PlatformCode, error) {
// the parent hard disk file locator. This function return error if no or fewer bytes could be read.
// The value is stored as 4 byte value starting at offset 4 relative to the beginning parent-hard-disk-locator-info.
// This value is stored in big-endian format.
//
func (f *Factory) readPlatformDataSpace() (int32, error) {
value, err := f.vhdReader.ReadInt32(f.locatorOffset + 4)
if err != nil {
@@ -99,7 +95,6 @@ func (f *Factory) readPlatformDataSpace() (int32, error) {
// locator in bytes. This function return error if no or fewer bytes could be read. The value is stored
// as 4 byte value starting at offset 8 relative to the beginning parent-hard-disk-locator-info. This value
// is stored in big-endian format.
//
func (f *Factory) readPlatformDataLength() (int32, error) {
value, err := f.vhdReader.ReadInt32(f.locatorOffset + 8)
if err != nil {
@@ -112,7 +107,6 @@ func (f *Factory) readPlatformDataLength() (int32, error) {
// This function return error if no or fewer bytes could be read. The value is stored as 4 byte
// value starting at offset 12 relative to the beginning parent-hard-disk-locator-info.
// This value is stored in big-endian format.
//
func (f *Factory) readReserved() (int32, error) {
value, err := f.vhdReader.ReadInt32(f.locatorOffset + 12)
if err != nil {
@@ -125,7 +119,6 @@ func (f *Factory) readReserved() (int32, error) {
// specific file locator data is stored. Call to this function is panic if no or fewer bytes could be read.
// The value is stored as 4 byte value starting at offset 16 relative to the beginning parent-hard-disk-locator-info.
// This value is stored in big-endian format.
//
func (f *Factory) readPlatformDataOffset() (int64, error) {
value, err := f.vhdReader.ReadInt64(f.locatorOffset + 16)
if err != nil {

View File

@@ -1,15 +1,15 @@
package parentlocator
import (
"github.com/Microsoft/azure-vhd-utils/vhdcore/common"
"log"
"strings"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/common"
)
// ParentLocator represents an entry in Parent locator table. Each entry represents
// details (parent-hard-disk-locator-info) of file locator which is used to locate
// the parent disk file of differencing hard disk.
//
type ParentLocator struct {
// Offset = 0, Size = 4
// This field stores the code representing the platform-specific format used for
@@ -43,7 +43,6 @@ type ParentLocator struct {
// SetPlatformSpecificFileLocator retrieves the file locator value and store that in the property
// PlatformSpecificFileLocator
//
func (l *ParentLocator) SetPlatformSpecificFileLocator(fileLocator []byte) {
// 1. For the platform codes - W2Ru and W2Ku, fileLocator contents is UTF-16 encoded.
// 2. For the platform code - MacX, fileLocator contents is UTF-8 encoded.

View File

@@ -3,16 +3,15 @@ package reader
import (
"encoding/binary"
"fmt"
"github.com/Microsoft/azure-vhd-utils/vhdcore/common"
"io"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/common"
)
// bufferSizeInBytes is the size of the buffer used by BinaryReader
//
const bufferSizeInBytes = 16
// ReadAtReader interface that composes io.ReaderAt and io.Reader interfaces.
//
type ReadAtReader interface {
io.ReaderAt
io.Reader
@@ -20,7 +19,6 @@ type ReadAtReader interface {
// BinaryReader is the reader which can be used to read values of primitive types from a reader
// The reader supports reading data stored both in little-endian or big-endian format.
//
type BinaryReader struct {
buffer []byte
order binary.ByteOrder
@@ -31,7 +29,6 @@ type BinaryReader struct {
// NewBinaryReader creates a new instance of BinaryReader, from is the underlying data source
// to read from, order is the byte order used to encode the data in the source, size is the
// length of the data source in bytes.
//
func NewBinaryReader(from ReadAtReader, order binary.ByteOrder, size int64) *BinaryReader {
return &BinaryReader{
buffer: make([]byte, bufferSizeInBytes),
@@ -45,13 +42,11 @@ func NewBinaryReader(from ReadAtReader, order binary.ByteOrder, size int64) *Bin
// copied and an error if fewer bytes were read. The error is EOF only if no bytes were
// read. If an EOF happens after reading some but not all the bytes, ReadBytes returns
// ErrUnexpectedEOF. On return, n == len(buf) if and only if err == nil.
//
func (b *BinaryReader) ReadBytes(offset int64, buf []byte) (int, error) {
return b.from.ReadAt(buf, offset)
}
// ReadByte reads a byte from underlying source starting at byte offset off and returns it.
//
func (b *BinaryReader) ReadByte(offset int64) (byte, error) {
if _, err := b.readToBuffer(1, offset); err != nil {
return 0, err
@@ -62,7 +57,6 @@ func (b *BinaryReader) ReadByte(offset int64) (byte, error) {
// ReadBoolean reads a byte from underlying source starting at byte offset off and
// returns it as a bool.
//
func (b *BinaryReader) ReadBoolean(offset int64) (bool, error) {
if _, err := b.readToBuffer(1, offset); err != nil {
return false, err
@@ -72,7 +66,6 @@ func (b *BinaryReader) ReadBoolean(offset int64) (bool, error) {
// ReadUInt16 reads an encoded unsigned 2 byte integer from underlying source starting
// at byte offset off and return it as a uint16.
//
func (b *BinaryReader) ReadUInt16(offset int64) (uint16, error) {
if _, err := b.readToBuffer(2, offset); err != nil {
return 0, err
@@ -82,7 +75,6 @@ func (b *BinaryReader) ReadUInt16(offset int64) (uint16, error) {
// ReadInt16 reads an encoded signed 2 byte integer from underlying source starting
// at byte offset off returns it as a int16.
//
func (b *BinaryReader) ReadInt16(off int64) (int16, error) {
if _, err := b.readToBuffer(2, off); err != nil {
return 0, err
@@ -92,7 +84,6 @@ func (b *BinaryReader) ReadInt16(off int64) (int16, error) {
// ReadUInt32 reads an encoded unsigned 4 byte integer from underlying source starting
// at byte offset off returns it as a uint32.
//
func (b *BinaryReader) ReadUInt32(off int64) (uint32, error) {
if _, err := b.readToBuffer(4, off); err != nil {
return 0, err
@@ -102,7 +93,6 @@ func (b *BinaryReader) ReadUInt32(off int64) (uint32, error) {
// ReadInt32 reads an encoded signed 4 byte integer from underlying source starting
// at byte offset off and returns it as a int32.
//
func (b *BinaryReader) ReadInt32(off int64) (int32, error) {
if _, err := b.readToBuffer(4, off); err != nil {
return 0, err
@@ -112,7 +102,6 @@ func (b *BinaryReader) ReadInt32(off int64) (int32, error) {
// ReadUInt64 reads an encoded unsigned 8 byte integer from underlying source starting
// at byte offset off and returns it as a uint64.
//
func (b *BinaryReader) ReadUInt64(off int64) (uint64, error) {
if _, err := b.readToBuffer(8, off); err != nil {
return 0, err
@@ -122,7 +111,6 @@ func (b *BinaryReader) ReadUInt64(off int64) (uint64, error) {
// ReadInt64 reads an encoded signed 4 byte integer from underlying source starting
// at byte offset off and and returns it as a int64.
//
func (b *BinaryReader) ReadInt64(off int64) (int64, error) {
if _, err := b.readToBuffer(8, off); err != nil {
return 0, err
@@ -132,7 +120,6 @@ func (b *BinaryReader) ReadInt64(off int64) (int64, error) {
// ReadUUID reads 16 byte character sequence from underlying source starting
// at byte offset off and returns it as a UUID.
//
func (b *BinaryReader) ReadUUID(off int64) (*common.UUID, error) {
if _, err := b.readToBuffer(16, off); err != nil {
return nil, err
@@ -146,7 +133,6 @@ func (b *BinaryReader) ReadUUID(off int64) (*common.UUID, error) {
// and the error, if any.
// ReadAt always returns a non-nil error when n < len(numBytes). At end of file, that
// error is io.EOF.
//
func (b *BinaryReader) readToBuffer(numBytes int, off int64) (int, error) {
if numBytes > bufferSizeInBytes {
return 0, fmt.Errorf("Expected (0-%d) however found: %d", bufferSizeInBytes, numBytes)

View File

@@ -6,19 +6,17 @@ import (
"time"
"unsafe"
"github.com/Microsoft/azure-vhd-utils/vhdcore"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore"
)
// VhdReader is the reader used by various components responsible for reading different
// segments of VHD such as header, footer, BAT, block, bitmap and sector.
//
type VhdReader struct {
*BinaryReader
}
// NewVhdReader creates new instance of the VhdReader, that reads from the underlying
// source, size is the size of the source in bytes.
//
func NewVhdReader(source ReadAtReader, size int64) *VhdReader {
var order binary.ByteOrder
if isLittleEndian() {
@@ -31,7 +29,6 @@ func NewVhdReader(source ReadAtReader, size int64) *VhdReader {
// NewVhdReaderFromByteSlice creates a new instance of VhdReader, that uses the given
// byte slice as the underlying source to read from.
//
func NewVhdReaderFromByteSlice(b []byte) *VhdReader {
source := bytes.NewReader(b)
return NewVhdReader(source, int64(len(b)))
@@ -39,7 +36,6 @@ func NewVhdReaderFromByteSlice(b []byte) *VhdReader {
// ReadDateTime reads an encoded vhd timestamp from underlying source starting at byte
// offset off and return it as a time.Time.
//
func (r *VhdReader) ReadDateTime(off int64) (*time.Time, error) {
d, err := r.ReadUInt32(off)
if err != nil {
@@ -51,7 +47,6 @@ func (r *VhdReader) ReadDateTime(off int64) (*time.Time, error) {
// isLittleEndian returns true if the host machine is little endian, false for
// big endian
//
func isLittleEndian() bool {
var i int32 = 0x01020304
u := unsafe.Pointer(&i)

View File

@@ -3,16 +3,14 @@ package validator
import (
"fmt"
"github.com/Microsoft/azure-vhd-utils/vhdcore/diskstream"
"github.com/Microsoft/azure-vhd-utils/vhdcore/vhdfile"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/diskstream"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/vhdfile"
)
// oneTB is one TeraByte
//
const oneTB int64 = 1024 * 1024 * 1024 * 1024
// ValidateVhd returns error if the vhdPath refer to invalid vhd.
//
func ValidateVhd(vhdPath string) error {
vFactory := &vhdfile.FileFactory{}
_, err := vFactory.Create(vhdPath)
@@ -24,7 +22,6 @@ func ValidateVhd(vhdPath string) error {
// ValidateVhdSize returns error if size of the vhd referenced by vhdPath is more than
// the maximum allowed size (1TB)
//
func ValidateVhdSize(vhdPath string) error {
stream, _ := diskstream.CreateNewDiskStream(vhdPath)
if stream.GetSize() > oneTB {

View File

@@ -3,15 +3,14 @@ package vhdfile
import (
"fmt"
"github.com/Microsoft/azure-vhd-utils/vhdcore/bat"
"github.com/Microsoft/azure-vhd-utils/vhdcore/block"
"github.com/Microsoft/azure-vhd-utils/vhdcore/footer"
"github.com/Microsoft/azure-vhd-utils/vhdcore/header"
"github.com/Microsoft/azure-vhd-utils/vhdcore/reader"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/bat"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/block"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/footer"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/header"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/reader"
)
// VhdFile represents a VHD.
//
type VhdFile struct {
// Footer represents the disk's footer.
Footer *footer.Footer
@@ -31,14 +30,12 @@ type VhdFile struct {
// GetDiskType returns the type of the disk. Possible values are DiskTypeFixed, DiskTypeDynamic
// and DiskTypeDifferencing.
//
func (f *VhdFile) GetDiskType() footer.DiskType {
return f.Footer.DiskType
}
// GetBlockFactory returns a BlockFactory instance that can be used to create Block instances
// that represents blocks in the disk.
//
func (f *VhdFile) GetBlockFactory() (block.Factory, error) {
params := &block.FactoryParams{
VhdHeader: f.Header,
@@ -82,7 +79,6 @@ func (f *VhdFile) GetBlockFactory() (block.Factory, error) {
// GetIdentityChain returns VHD identity chain, for differencing disk this will be a slice with
// unique ids of this and all it's ancestor disks. For fixed and dynamic disk, this will be a
// slice with one entry representing disk's unique id.
//
func (f *VhdFile) GetIdentityChain() []string {
ids := []string{f.Footer.UniqueID.String()}
for p := f.Parent; p != nil; p = p.Parent {

View File

@@ -4,14 +4,13 @@ import (
"os"
"path/filepath"
"github.com/Microsoft/azure-vhd-utils/vhdcore/bat"
"github.com/Microsoft/azure-vhd-utils/vhdcore/footer"
"github.com/Microsoft/azure-vhd-utils/vhdcore/header"
"github.com/Microsoft/azure-vhd-utils/vhdcore/reader"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/bat"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/footer"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/header"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore/reader"
)
// FileFactory is a type to create VhdFile representing VHD in the local machine
//
type FileFactory struct {
vhdDir string // Path to the directory holding VHD file
fd *os.File // File descriptor of the VHD file
@@ -20,7 +19,6 @@ type FileFactory struct {
}
// Create creates a new VhdFile representing a VHD in the local machine located at vhdPath
//
func (f *FileFactory) Create(vhdPath string) (*VhdFile, error) {
var err error
if f.fd, err = os.Open(vhdPath); err != nil {
@@ -41,7 +39,6 @@ func (f *FileFactory) Create(vhdPath string) (*VhdFile, error) {
// CreateFromReaderAtReader creates a new VhdFile from a reader.ReadAtReader, which is a reader associated
// with a VHD in the local machine. The parameter size is the size of the VHD in bytes
//
func (f *FileFactory) CreateFromReaderAtReader(r reader.ReadAtReader, size int64) (*VhdFile, error) {
vhdReader := reader.NewVhdReader(r, size)
vhdFooter, err := (footer.NewFactory(vhdReader)).Create()
@@ -95,7 +92,6 @@ func (f *FileFactory) CreateFromReaderAtReader(r reader.ReadAtReader, size int64
// Dispose disposes this instance of VhdFileFactory and VhdFileFactory instances of parent and child
// VHDs
//
func (f *FileFactory) Dispose(err error) {
if f.fd != nil {
f.fd.Close()
@@ -112,7 +108,6 @@ func (f *FileFactory) Dispose(err error) {
}
// Dispose disposes this instance of VhdFileFactory and VhdFileFactory instances of all ancestor VHDs
//
func (f *FileFactory) disposeUp(err error) {
if f.fd != nil {
f.fd.Close()
@@ -125,7 +120,6 @@ func (f *FileFactory) disposeUp(err error) {
}
// Dispose disposes this instance of VhdFileFactory and VhdFileFactory instances of all descendant VHDs
//
func (f *FileFactory) disposeDown(err error) {
if f.fd != nil {
f.fd.Close()

View File

@@ -7,19 +7,17 @@ import (
"time"
"unsafe"
"github.com/Microsoft/azure-vhd-utils/vhdcore"
"yunion.io/x/cloudmux/pkg/multicloud/azure/vhdcore"
)
// VhdWriter is the writer used by various components responsible for writing header and
// footer of the VHD.
//
type VhdWriter struct {
*BinaryWriter
}
// NewVhdWriter creates new instance of the VhdWriter, that writes to the underlying target,
// size is the size of the target in bytes.
//
func NewVhdWriter(target io.WriterAt, size int64) *VhdWriter {
var order binary.ByteOrder
if isLittleEndian() {
@@ -32,26 +30,22 @@ func NewVhdWriter(target io.WriterAt, size int64) *VhdWriter {
// NewVhdWriterFromByteSlice creates a new instance of VhdWriter, that uses the given byte
// slice as the underlying target to write to.
//
func NewVhdWriterFromByteSlice(b []byte) *VhdWriter {
return NewVhdWriter(ByteSliceWriteAt(b), int64(len(b)))
}
// WriteTimeStamp writes vhd timestamp represented by the given time to underlying source
// starting at byte offset off.
//
func (r *VhdWriter) WriteTimeStamp(off int64, time *time.Time) {
vhdTimeStamp := vhdcore.NewVhdTimeStamp(time)
r.WriteUInt32(off, vhdTimeStamp.TotalSeconds)
}
// ByteSliceWriteAt is a type that satisfies io.WriteAt interface for byte slice.
//
type ByteSliceWriteAt []byte
// WriteAt copies len(b) bytes to the byte slice starting at byte offset off. It returns the number
// of bytes copied and an error, if any. WriteAt returns a non-nil error when n != len(b).
//
func (s ByteSliceWriteAt) WriteAt(b []byte, off int64) (n int, err error) {
if off < 0 || off > int64(len(s)) {
err = fmt.Errorf("Index %d is out of the boundary %d", off, len(s)-1)
@@ -67,7 +61,6 @@ func (s ByteSliceWriteAt) WriteAt(b []byte, off int64) (n int, err error) {
}
// isLittleEndian returns true if the host machine is little endian, false for big endian.
//
func isLittleEndian() bool {
var i int32 = 0x01020304
u := unsafe.Pointer(&i)