I'm writing a converter for integer strings to integer values, which seems like it should be pretty easy since the string values are 0x30 ("0") through 0x39 ("9"). So just subtract 0x30 and multiply by 10**exp based on the position in the string.
I just can't figure out how to get the hex - int conversions to work,
particularly when I need to do the math part. Would be relatively easy if there were a conversion from bytes1 to int or uint, but I can't seem to find that.
Here's what I've got so far, but compile errors on the indicated line.
uint val=0;
bytes memory stringBytes = bytes(numberString);
for (uint i = 0; i<stringBytes.length; i++) {
uint exp = stringBytes.length - i;
bytes1 ival = stringBytes[i];
bytes1 jval = ival - 0x30;
val += (jval * (10**exp)); <--doesn't compile.
}
return val;
}
In case anybody stumbles across this -- this seems to work for strings representing positive integers. I don't think too hard to extend to string like "-23.1".
function st2num(string memory numString) public pure returns(uint) {
uint val=0;
bytes memory stringBytes = bytes(numString);
for (uint i = 0; i<stringBytes.length; i++) {
uint exp = stringBytes.length - i;
bytes1 ival = stringBytes[i];
uint8 uval = uint8(ival);
uint jval = uval - uint(0x30);
val += (uint(jval) * (10**(exp-1)));
}
return val;
}
Related
I need to read each of the 256 bits stored in a bytes32 variable. I'm using Solidity 8.1
Thank you for any help you can offer
// SPDX-License-Identifier: GPL-3.0
pragma solidity >=0.7.0 <0.9.0;
contract Test {
// 1 byte is enough to hold about 1 typed character, 4 x "abcdefgh"
bytes32 public test = "abcdefghabcdefghabcdefghabcdefgh";
function accessByteByIndex(uint index) public view returns(bytes1) {
// since you need 32 bytes
require(index<32,"Index out of range");
return test[index];
}
function reverseBytes() public pure returns(bytes memory) {
uint num = 32;
bytes memory chars = new bytes(num);
bytes memory reverse = new bytes(num);
chars = "abcdefghabcdefghabcdefghabcdefgh";
uint i = 0;
for (i = 0; i < num; i++) {
reverse[i] = chars[num - 1 - i];
}
return reverse;
}
}
needed to cast the bytes32 variable to a uint256 then use the following function:
function isBitSet(uint256 b, uint256 pos) public pure returns (bool) {
return ((b >> pos) & 1) == 1;
}
I am trying to write a function to reverse the digits of a uint in a solidity smart contract.
I saw this answer here that shows how to reverse a string in solidity by looping through the bytes of a passed string from back to front, but since I am only concerned with integers I wondered if there was a way to do so that only used integers, and was able to appropriately handle overflows.
Thanks!
Searching around I found this answer that gives a solution to this very problem in C. I was able to tweak this to work in Solidity.
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
contract Reverse {
function reverse_recurse(uint i, uint r) internal returns(uint) {
if (i != 0) {
uint least_digit = i % 10;
if (r >= type(uint).max / 10 && (r > type(uint).max / 10 || least_digit > type(uint).max % 10)) {
return 0; /// Overflow
}
r = reverse_recurse(i / 10, r * 10 + least_digit);
}
return r;
}
// Reverses digits in a uint, overflow returns 0
function reverse_int(uint i) public returns(uint) {
return reverse_recurse(i, 0);
}
}
decoded input { "uint256 i": "12345" }
decoded output { "0": "uint256: 54321" }
Unlike the C solution this works only for unsigned ints as that was all I was needing.
Above answer seem to complicated to me so decided to create something on my own
function reverseNumber(uint n) public pure returns(uint) {
uint num = n;
uint reversedNum = 0;
while(true) {
if (reversedNum == 0) {
reversedNum = num % 10;
} else {
reversedNum = (reversedNum * 10) + (num % 10);
}
if(num < 10) {
break;
}
num = num / 10;
}
return reversedNum == n ? 1 : 0;
}
I am compiling code from OpenSea project written in Sol 0.5.0 using 0.8.0 compiler, and I'm getting error:
ParserError: Expected primary expression.
--> contracts/Strings.sol:53:25:
|
53 | bstr[k--] = byte(uint8(48 + _i % 10));
| ^^^^
Error HH600: Compilation failed
The original code is found at: https://github.com/ProjectOpenSea/opensea-creatures/blob/master/contracts/Strings.sol, it uses Sol 0.5.0 and is presumably compiled with truffle. I am attempting to use Hardhat and 0.8.0. The code is reproduced below:
pragma solidity ^0.8.0;
library Strings {
// via https://github.com/oraclize/ethereum-api/blob/master/oraclizeAPI_0.5.sol
function strConcat(string memory _a, string memory _b, string memory _c, string memory _d, string memory _e) internal pure returns (string memory) {
bytes memory _ba = bytes(_a);
bytes memory _bb = bytes(_b);
bytes memory _bc = bytes(_c);
bytes memory _bd = bytes(_d);
bytes memory _be = bytes(_e);
string memory abcde = new string(_ba.length + _bb.length + _bc.length + _bd.length + _be.length);
bytes memory babcde = bytes(abcde);
uint k = 0;
for (uint i = 0; i < _ba.length; i++) babcde[k++] = _ba[i];
for (uint i = 0; i < _bb.length; i++) babcde[k++] = _bb[i];
for (uint i = 0; i < _bc.length; i++) babcde[k++] = _bc[i];
for (uint i = 0; i < _bd.length; i++) babcde[k++] = _bd[i];
for (uint i = 0; i < _be.length; i++) babcde[k++] = _be[i];
return string(babcde);
}
function strConcat(string memory _a, string memory _b, string memory _c, string memory _d) internal pure returns (string memory) {
return strConcat(_a, _b, _c, _d, "");
}
function strConcat(string memory _a, string memory _b, string memory _c) internal pure returns (string memory) {
return strConcat(_a, _b, _c, "", "");
}
function strConcat(string memory _a, string memory _b) internal pure returns (string memory) {
return strConcat(_a, _b, "", "", "");
}
function uint2str(uint _i) internal pure returns (string memory _uintAsString) {
if (_i == 0) {
return "0";
}
uint j = _i;
uint len;
while (j != 0) {
len++;
j /= 10;
}
bytes memory bstr = new bytes(len);
uint k = len - 1;
while (_i != 0) {
bstr[k--] = byte(uint8(48 + _i % 10));
_i /= 10;
}
return string(bstr);
}
}
Note I changed the pragma up top. everything looks fine to me so I'm not sure where the issue is aside from the fact that it's on this line: bstr[k--] = byte(uint8(48 + _i % 10));
Use bytes1 instead of byte.
The type byte has been removed. It was an alias of bytes1.
Source: https://docs.soliditylang.org/en/v0.8.3/080-breaking-changes.html#silent-changes-of-the-semantics
I'm currently trying to offset bytes in solidity to implement a simple Caesar cipher decryption. However, I can't figure out how to do this. Here is my current code, which gives a few compiler errors in Remix:
function decrypt(bytes32 data, int key) public returns (bool) {
bytes32 decryptedData = data; // `data` here is the encrypted data
// decryption
for (int i = 0; i < decryptedData.length; i++) {
decryptedData[i] = (decryptedData[i] - key) % 256;
}
// returns if sha256(decryptedData) matches some value
}
However, this gives me the following errors:
TypeError: Expression has to be an lvalue.
decryptedData[i] = (decryptedData[i] - key) % 256;
^--------------^
TypeError: Operator - not compatible with types bytes1 and int256
decryptedData[i] = (decryptedData[i] - key) % 256;
^--------------------^
TypeError: Operator % not compatible with types bytes1 and int_const 256
decryptedData[i] = (decryptedData[i] - key) % 256;
^----------------------------^
Thanks!
Like Damian Green said, I'm a bit confused on the algorithm you're trying to write, but the contract below will decrypt a Caesar encrypted text. You should be able to modify it for your needs. (Please excuse the lazy hard-coding of ASCII values).
Note that you can't use bytes32 as that is treated as a special array in Solidity and is read-only. See "index access" section of http://solidity.readthedocs.io/en/develop/types.html#fixed-size-byte-arrays
pragma solidity ^0.4.17;
contract CaesarDecryption {
function decrypt(bytes data, int key) pure public returns (bytes) {
bytes memory decryptedData = data;
for (uint i = 0; i < decryptedData.length; i++) {
decryptedData[i] = decryptByte(decryptedData[i], key);
}
return decryptedData;
}
function decryptByte(byte b, int k) pure internal returns (byte) {
uint8 ascii = uint8(b);
uint8 asciiShift;
if (ascii >= 65 && ascii <= 90)
asciiShift = 65;
else if (ascii >= 97 && ascii <=122)
asciiShift = 97;
return byte(((ascii - asciiShift - k + 26) % 26) + asciiShift);
}
}
I wrote a __device__ function that uses a for loop. It works on GTX640 card (compute capability 2.1), but not on 9500GT (compute capability 1.1).
The function is roughly like this:
__device__ void myFuncD(float4 *myArray, float4 *result, uint index, uint foo, uint *here, uint *there)
{
uint j;
float4 myValue = myArray[index];
uint idxHere = here[foo];
uint idxThere = there[foo];
float4 temp;
for(j=idxHere;j<idxThere;j++){
temp = myArray[j];
//do things with myValue and temp, write result to *result
result->x += /* some calculations with myValue.x and temp.x */
result->y += /* some calculations with myValue.y and temp.y */
result->z += /* some calculations with myValue.z and temp.z */
}
}
__global__ void myKernelD(float4 *myArray, float4 *myResults, uint *here, uint *there)
{
uint index = blockDim.x*blockIdx.x+threadIdx.x;
float4 result = = make_float4(0.0f,0.0f,0.0f,0.0f);
uint foo1, foo2, foo3, foo4;
//compute foo1, foo2, foo3, foo4 based on myArray[index]
myFuncD(myArray, &result, index, foo1, here, there);
myFuncD(myArray, &result, index, foo2, here, there);
myFuncD(myArray, &result, index, foo3, here, there);
myFuncD(myArray, &result, index, foo4, here, there);
myResults[index] = result;
}
On GTX460, myResults has proper values, but on 9500GT every components of its members are all zeroes.
How can I achieve the same effect with a compute capability 1.1 device?
The user was trying to use too many threads per block to launch, and was getting the error "too many resources requested for launch". Decreasing threads per block allowed the kernel to launch.