开发者

template type into binary representation

#include <iostream>

using namespace std;

template<class T>
void toBinary(T num) 
{
  char *  numi = reinterpret_cast<char*>(&num);

  for (int i = 1; i <= sizeof(T); i++)  
  {
      for( int j = 1 ; j <= 8; ++j )
      {
          char byte = numi[i];
          cout << ( byte & j  ? 1 :  0);    
 开发者_StackOverflow社区     }
  }
  cout << endl << endl;
}

int main() 
{   
  toBinary(1); 
  std::cin.get();
}

the output is 0000000000000... Could you tell me where is my mistake ?

EDIT:

#include <iostream>
#include <bitset>
#include <iomanip>
#include <boost/format.hpp>
using namespace std;

template<class T> bitset<sizeof(T)*CHAR_BIT> toBinary(const T num) 
{
    bitset<sizeof(T)*CHAR_BIT> mybits;
    const char * const p = reinterpret_cast<const char*>(&num);
    for (int i = sizeof(T)*CHAR_BIT-1 ; i >= 0 ; --i)
        mybits.set(i, (*(p)&(1<<i)));
    return mybits;
}

template<class T> void printBinary(T num, ostream& stream = cout)
{
    stream << boost::format("%-35s %-8s %-32s\n")  %  typeid(T).name() % num % toBinary(num).to_string();
}

struct Foo{void bar(){}};

int main() 
{   
  printBinary(-8);
  printBinary(8u);
  printBinary('a');
  printBinary(8.2f);  
  printBinary("Overflow");
  printBinary(main);
  printBinary(&Foo::bar);
  printBinary(8.2);
  std::cin.get();
}


I guess, if I really wanted to fix this code as-is, I would do it like so:

#include <iostream>
#include <string>

using namespace std;

template<class T>
    void toBinary(const T& num) 
{
  const char *const  asbytes = reinterpret_cast<const char* const>(&num);

  for (const char* byte=asbytes + sizeof(T) - 1; byte>=asbytes; byte--)
  {
      for ( int bitnr = 7; bitnr>=0; bitnr-- )
      {
          cout << ( (*byte & (1<<bitnr))  ? 1 :  0);    
      }
  }
  cout << endl << endl;
}

int main() 
{   
  toBinary(1); 
  std::cin.get();
}


I see two things:

  1. loop in i should begin at 0
  2. j++ should be j <<= 1.

Indeed,

1      = 0b00000001
1 << 1 = 0b00000010
1 << 2 = 0b00000100
...

Contrast this with what you do:

1     = 0b00000001
1 + 1 = 0b00000010
1 + 2 = 0b00000011
1 + 3 = 0b00000100

which is not what you want.

Also, the standard does not guarantees that there is 8 bits in a byte. Type char is guaranteed to be one byte, sizeof measures size in bytes, and to know the numbers of bits in a byte, use the CHAR_BIT macro:

for (j = 1; j <= 1 << CHAR_BIT; j <<= 1)
{
    char byte = numi[i];
    cout << (byte & j ? 1 : 0);    
}  


byte & j does not check if the j'th bit is set, it just checks if any of the bits that are set in j are set in byte as well.

To check for a particular bit, use (byte & (1 << j)) != 0 (j is zero-based in this case!).

0

上一篇:

下一篇:

精彩评论

暂无评论...
验证码 换一张
取 消

最新问答

问答排行榜